problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_6731 | rasdani/github-patches | git_diff | Bitmessage__PyBitmessage-1334 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Crashing on Boot
$ python src/bitmessagemain.py
Loading existing config files from /home/.config/PyBitmessage/
2018-08-05 13:19:02,170 - WARNING - Using default logger configuration
2018-08-05 13:19:02,592 - CRITICAL - Unhandled exception
Traceback (most recent call last):
File "src/bitmessagemain.py", line 507, in <module>
File "src/bitmessagemain.py", line 503, in main
File "src/bitmessagemain.py", line 276, in start
File "PyBitmessage/src/knownnodes.py", line 100, in readKnownNodes
pickle_deserialize_old_knownnodes(source)
File "PyBitmessage/src/knownnodes.py", line 63, in pickle_deserialize_old_knownnodes
knownNodes = pickle.load(source)
File "/usr/lib/python2.7/pickle.py", line 1384, in load
return Unpickler(file).load()
File "/usr/lib/python2.7/pickle.py", line 864, in load
dispatch[key](self)
File "/usr/lib/python2.7/pickle.py", line 886, in load_eof
raise EOFError
EOFError
Cleaning up lockfile
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/knownnodes.py`
Content:
```
1 import json
2 import os
3 import pickle
4 # import sys
5 import threading
6 import time
7
8 import state
9 from bmconfigparser import BMConfigParser
10 from debug import logger
11
12 knownNodesLock = threading.Lock()
13 knownNodes = {stream: {} for stream in range(1, 4)}
14
15 knownNodesTrimAmount = 2000
16
17 # forget a node after rating is this low
18 knownNodesForgetRating = -0.5
19
20 DEFAULT_NODES = (
21 state.Peer('5.45.99.75', 8444),
22 state.Peer('75.167.159.54', 8444),
23 state.Peer('95.165.168.168', 8444),
24 state.Peer('85.180.139.241', 8444),
25 state.Peer('158.222.217.190', 8080),
26 state.Peer('178.62.12.187', 8448),
27 state.Peer('24.188.198.204', 8111),
28 state.Peer('109.147.204.113', 1195),
29 state.Peer('178.11.46.221', 8444)
30 )
31
32
33 def json_serialize_knownnodes(output):
34 """
35 Reorganize knownnodes dict and write it as JSON to output
36 """
37 _serialized = []
38 for stream, peers in knownNodes.iteritems():
39 for peer, info in peers.iteritems():
40 info.update(rating=round(info.get('rating', 0), 2))
41 _serialized.append({
42 'stream': stream, 'peer': peer._asdict(), 'info': info
43 })
44 json.dump(_serialized, output, indent=4)
45
46
47 def json_deserialize_knownnodes(source):
48 """
49 Read JSON from source and make knownnodes dict
50 """
51 for node in json.load(source):
52 peer = node['peer']
53 peer['host'] = str(peer['host'])
54 knownNodes[node['stream']][state.Peer(**peer)] = node['info']
55
56
57 def pickle_deserialize_old_knownnodes(source):
58 """
59 Unpickle source and reorganize knownnodes dict if it's in old format
60 the old format was {Peer:lastseen, ...}
61 the new format is {Peer:{"lastseen":i, "rating":f}}
62 """
63 knownNodes = pickle.load(source)
64 for stream in knownNodes.keys():
65 for node, params in knownNodes[stream].items():
66 if isinstance(params, (float, int)):
67 addKnownNode(stream, node, params)
68
69
70 def saveKnownNodes(dirName=None):
71 if dirName is None:
72 dirName = state.appdata
73 with knownNodesLock:
74 with open(os.path.join(dirName, 'knownnodes.dat'), 'wb') as output:
75 json_serialize_knownnodes(output)
76
77
78 def addKnownNode(stream, peer, lastseen=None, is_self=False):
79 knownNodes[stream][peer] = {
80 "lastseen": lastseen or time.time(),
81 "rating": 0,
82 "self": is_self,
83 }
84
85
86 def createDefaultKnownNodes():
87 for peer in DEFAULT_NODES:
88 addKnownNode(1, peer)
89 saveKnownNodes()
90
91
92 def readKnownNodes():
93 try:
94 with open(state.appdata + 'knownnodes.dat', 'rb') as source:
95 with knownNodesLock:
96 try:
97 json_deserialize_knownnodes(source)
98 except ValueError:
99 source.seek(0)
100 pickle_deserialize_old_knownnodes(source)
101 except (IOError, OSError, KeyError):
102 logger.debug(
103 'Failed to read nodes from knownnodes.dat', exc_info=True)
104 createDefaultKnownNodes()
105
106 config = BMConfigParser()
107 # if config.safeGetInt('bitmessagesettings', 'settingsversion') > 10:
108 # sys.exit(
109 # 'Bitmessage cannot read future versions of the keys file'
110 # ' (keys.dat). Run the newer version of Bitmessage.')
111
112 # your own onion address, if setup
113 onionhostname = config.safeGet('bitmessagesettings', 'onionhostname')
114 if onionhostname and ".onion" in onionhostname:
115 onionport = config.safeGetInt('bitmessagesettings', 'onionport')
116 if onionport:
117 addKnownNode(1, state.Peer(onionhostname, onionport), is_self=True)
118
119
120 def increaseRating(peer):
121 increaseAmount = 0.1
122 maxRating = 1
123 with knownNodesLock:
124 for stream in knownNodes.keys():
125 try:
126 knownNodes[stream][peer]["rating"] = min(
127 knownNodes[stream][peer]["rating"] + increaseAmount,
128 maxRating
129 )
130 except KeyError:
131 pass
132
133
134 def decreaseRating(peer):
135 decreaseAmount = 0.1
136 minRating = -1
137 with knownNodesLock:
138 for stream in knownNodes.keys():
139 try:
140 knownNodes[stream][peer]["rating"] = max(
141 knownNodes[stream][peer]["rating"] - decreaseAmount,
142 minRating
143 )
144 except KeyError:
145 pass
146
147
148 def trimKnownNodes(recAddrStream=1):
149 if len(knownNodes[recAddrStream]) < \
150 BMConfigParser().safeGetInt("knownnodes", "maxnodes"):
151 return
152 with knownNodesLock:
153 oldestList = sorted(
154 knownNodes[recAddrStream],
155 key=lambda x: x['lastseen']
156 )[:knownNodesTrimAmount]
157 for oldest in oldestList:
158 del knownNodes[recAddrStream][oldest]
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/knownnodes.py b/src/knownnodes.py
--- a/src/knownnodes.py
+++ b/src/knownnodes.py
@@ -98,7 +98,7 @@
except ValueError:
source.seek(0)
pickle_deserialize_old_knownnodes(source)
- except (IOError, OSError, KeyError):
+ except (IOError, OSError, KeyError, EOFError):
logger.debug(
'Failed to read nodes from knownnodes.dat', exc_info=True)
createDefaultKnownNodes()
| {"golden_diff": "diff --git a/src/knownnodes.py b/src/knownnodes.py\n--- a/src/knownnodes.py\n+++ b/src/knownnodes.py\n@@ -98,7 +98,7 @@\n except ValueError:\n source.seek(0)\n pickle_deserialize_old_knownnodes(source)\n- except (IOError, OSError, KeyError):\n+ except (IOError, OSError, KeyError, EOFError):\n logger.debug(\n 'Failed to read nodes from knownnodes.dat', exc_info=True)\n createDefaultKnownNodes()\n", "issue": "Crashing on Boot\n$ python src/bitmessagemain.py \r\nLoading existing config files from /home/.config/PyBitmessage/\r\n2018-08-05 13:19:02,170 - WARNING - Using default logger configuration\r\n2018-08-05 13:19:02,592 - CRITICAL - Unhandled exception\r\nTraceback (most recent call last):\r\n File \"src/bitmessagemain.py\", line 507, in <module>\r\n File \"src/bitmessagemain.py\", line 503, in main\r\n File \"src/bitmessagemain.py\", line 276, in start\r\n File \"PyBitmessage/src/knownnodes.py\", line 100, in readKnownNodes\r\n pickle_deserialize_old_knownnodes(source)\r\n File \"PyBitmessage/src/knownnodes.py\", line 63, in pickle_deserialize_old_knownnodes\r\n knownNodes = pickle.load(source)\r\n File \"/usr/lib/python2.7/pickle.py\", line 1384, in load\r\n return Unpickler(file).load()\r\n File \"/usr/lib/python2.7/pickle.py\", line 864, in load\r\n dispatch[key](self)\r\n File \"/usr/lib/python2.7/pickle.py\", line 886, in load_eof\r\n raise EOFError\r\nEOFError\r\nCleaning up lockfile\r\n\n", "before_files": [{"content": "import json\nimport os\nimport pickle\n# import sys\nimport threading\nimport time\n\nimport state\nfrom bmconfigparser import BMConfigParser\nfrom debug import logger\n\nknownNodesLock = threading.Lock()\nknownNodes = {stream: {} for stream in range(1, 4)}\n\nknownNodesTrimAmount = 2000\n\n# forget a node after rating is this low\nknownNodesForgetRating = -0.5\n\nDEFAULT_NODES = (\n state.Peer('5.45.99.75', 8444),\n state.Peer('75.167.159.54', 8444),\n state.Peer('95.165.168.168', 8444),\n state.Peer('85.180.139.241', 8444),\n state.Peer('158.222.217.190', 8080),\n state.Peer('178.62.12.187', 8448),\n state.Peer('24.188.198.204', 8111),\n state.Peer('109.147.204.113', 1195),\n state.Peer('178.11.46.221', 8444)\n)\n\n\ndef json_serialize_knownnodes(output):\n \"\"\"\n Reorganize knownnodes dict and write it as JSON to output\n \"\"\"\n _serialized = []\n for stream, peers in knownNodes.iteritems():\n for peer, info in peers.iteritems():\n info.update(rating=round(info.get('rating', 0), 2))\n _serialized.append({\n 'stream': stream, 'peer': peer._asdict(), 'info': info\n })\n json.dump(_serialized, output, indent=4)\n\n\ndef json_deserialize_knownnodes(source):\n \"\"\"\n Read JSON from source and make knownnodes dict\n \"\"\"\n for node in json.load(source):\n peer = node['peer']\n peer['host'] = str(peer['host'])\n knownNodes[node['stream']][state.Peer(**peer)] = node['info']\n\n\ndef pickle_deserialize_old_knownnodes(source):\n \"\"\"\n Unpickle source and reorganize knownnodes dict if it's in old format\n the old format was {Peer:lastseen, ...}\n the new format is {Peer:{\"lastseen\":i, \"rating\":f}}\n \"\"\"\n knownNodes = pickle.load(source)\n for stream in knownNodes.keys():\n for node, params in knownNodes[stream].items():\n if isinstance(params, (float, int)):\n addKnownNode(stream, node, params)\n\n\ndef saveKnownNodes(dirName=None):\n if dirName is None:\n dirName = state.appdata\n with knownNodesLock:\n with open(os.path.join(dirName, 'knownnodes.dat'), 'wb') as output:\n json_serialize_knownnodes(output)\n\n\ndef addKnownNode(stream, peer, lastseen=None, is_self=False):\n knownNodes[stream][peer] = {\n \"lastseen\": lastseen or time.time(),\n \"rating\": 0,\n \"self\": is_self,\n }\n\n\ndef createDefaultKnownNodes():\n for peer in DEFAULT_NODES:\n addKnownNode(1, peer)\n saveKnownNodes()\n\n\ndef readKnownNodes():\n try:\n with open(state.appdata + 'knownnodes.dat', 'rb') as source:\n with knownNodesLock:\n try:\n json_deserialize_knownnodes(source)\n except ValueError:\n source.seek(0)\n pickle_deserialize_old_knownnodes(source)\n except (IOError, OSError, KeyError):\n logger.debug(\n 'Failed to read nodes from knownnodes.dat', exc_info=True)\n createDefaultKnownNodes()\n\n config = BMConfigParser()\n # if config.safeGetInt('bitmessagesettings', 'settingsversion') > 10:\n # sys.exit(\n # 'Bitmessage cannot read future versions of the keys file'\n # ' (keys.dat). Run the newer version of Bitmessage.')\n\n # your own onion address, if setup\n onionhostname = config.safeGet('bitmessagesettings', 'onionhostname')\n if onionhostname and \".onion\" in onionhostname:\n onionport = config.safeGetInt('bitmessagesettings', 'onionport')\n if onionport:\n addKnownNode(1, state.Peer(onionhostname, onionport), is_self=True)\n\n\ndef increaseRating(peer):\n increaseAmount = 0.1\n maxRating = 1\n with knownNodesLock:\n for stream in knownNodes.keys():\n try:\n knownNodes[stream][peer][\"rating\"] = min(\n knownNodes[stream][peer][\"rating\"] + increaseAmount,\n maxRating\n )\n except KeyError:\n pass\n\n\ndef decreaseRating(peer):\n decreaseAmount = 0.1\n minRating = -1\n with knownNodesLock:\n for stream in knownNodes.keys():\n try:\n knownNodes[stream][peer][\"rating\"] = max(\n knownNodes[stream][peer][\"rating\"] - decreaseAmount,\n minRating\n )\n except KeyError:\n pass\n\n\ndef trimKnownNodes(recAddrStream=1):\n if len(knownNodes[recAddrStream]) < \\\n BMConfigParser().safeGetInt(\"knownnodes\", \"maxnodes\"):\n return\n with knownNodesLock:\n oldestList = sorted(\n knownNodes[recAddrStream],\n key=lambda x: x['lastseen']\n )[:knownNodesTrimAmount]\n for oldest in oldestList:\n del knownNodes[recAddrStream][oldest]\n", "path": "src/knownnodes.py"}], "after_files": [{"content": "import json\nimport os\nimport pickle\n# import sys\nimport threading\nimport time\n\nimport state\nfrom bmconfigparser import BMConfigParser\nfrom debug import logger\n\nknownNodesLock = threading.Lock()\nknownNodes = {stream: {} for stream in range(1, 4)}\n\nknownNodesTrimAmount = 2000\n\n# forget a node after rating is this low\nknownNodesForgetRating = -0.5\n\nDEFAULT_NODES = (\n state.Peer('5.45.99.75', 8444),\n state.Peer('75.167.159.54', 8444),\n state.Peer('95.165.168.168', 8444),\n state.Peer('85.180.139.241', 8444),\n state.Peer('158.222.217.190', 8080),\n state.Peer('178.62.12.187', 8448),\n state.Peer('24.188.198.204', 8111),\n state.Peer('109.147.204.113', 1195),\n state.Peer('178.11.46.221', 8444)\n)\n\n\ndef json_serialize_knownnodes(output):\n \"\"\"\n Reorganize knownnodes dict and write it as JSON to output\n \"\"\"\n _serialized = []\n for stream, peers in knownNodes.iteritems():\n for peer, info in peers.iteritems():\n info.update(rating=round(info.get('rating', 0), 2))\n _serialized.append({\n 'stream': stream, 'peer': peer._asdict(), 'info': info\n })\n json.dump(_serialized, output, indent=4)\n\n\ndef json_deserialize_knownnodes(source):\n \"\"\"\n Read JSON from source and make knownnodes dict\n \"\"\"\n for node in json.load(source):\n peer = node['peer']\n peer['host'] = str(peer['host'])\n knownNodes[node['stream']][state.Peer(**peer)] = node['info']\n\n\ndef pickle_deserialize_old_knownnodes(source):\n \"\"\"\n Unpickle source and reorganize knownnodes dict if it's in old format\n the old format was {Peer:lastseen, ...}\n the new format is {Peer:{\"lastseen\":i, \"rating\":f}}\n \"\"\"\n knownNodes = pickle.load(source)\n for stream in knownNodes.keys():\n for node, params in knownNodes[stream].items():\n if isinstance(params, (float, int)):\n addKnownNode(stream, node, params)\n\n\ndef saveKnownNodes(dirName=None):\n if dirName is None:\n dirName = state.appdata\n with knownNodesLock:\n with open(os.path.join(dirName, 'knownnodes.dat'), 'wb') as output:\n json_serialize_knownnodes(output)\n\n\ndef addKnownNode(stream, peer, lastseen=None, is_self=False):\n knownNodes[stream][peer] = {\n \"lastseen\": lastseen or time.time(),\n \"rating\": 0,\n \"self\": is_self,\n }\n\n\ndef createDefaultKnownNodes():\n for peer in DEFAULT_NODES:\n addKnownNode(1, peer)\n saveKnownNodes()\n\n\ndef readKnownNodes():\n try:\n with open(state.appdata + 'knownnodes.dat', 'rb') as source:\n with knownNodesLock:\n try:\n json_deserialize_knownnodes(source)\n except ValueError:\n source.seek(0)\n pickle_deserialize_old_knownnodes(source)\n except (IOError, OSError, KeyError, EOFError):\n logger.debug(\n 'Failed to read nodes from knownnodes.dat', exc_info=True)\n createDefaultKnownNodes()\n\n config = BMConfigParser()\n # if config.safeGetInt('bitmessagesettings', 'settingsversion') > 10:\n # sys.exit(\n # 'Bitmessage cannot read future versions of the keys file'\n # ' (keys.dat). Run the newer version of Bitmessage.')\n\n # your own onion address, if setup\n onionhostname = config.safeGet('bitmessagesettings', 'onionhostname')\n if onionhostname and \".onion\" in onionhostname:\n onionport = config.safeGetInt('bitmessagesettings', 'onionport')\n if onionport:\n addKnownNode(1, state.Peer(onionhostname, onionport), is_self=True)\n\n\ndef increaseRating(peer):\n increaseAmount = 0.1\n maxRating = 1\n with knownNodesLock:\n for stream in knownNodes.keys():\n try:\n knownNodes[stream][peer][\"rating\"] = min(\n knownNodes[stream][peer][\"rating\"] + increaseAmount,\n maxRating\n )\n except KeyError:\n pass\n\n\ndef decreaseRating(peer):\n decreaseAmount = 0.1\n minRating = -1\n with knownNodesLock:\n for stream in knownNodes.keys():\n try:\n knownNodes[stream][peer][\"rating\"] = max(\n knownNodes[stream][peer][\"rating\"] - decreaseAmount,\n minRating\n )\n except KeyError:\n pass\n\n\ndef trimKnownNodes(recAddrStream=1):\n if len(knownNodes[recAddrStream]) < \\\n BMConfigParser().safeGetInt(\"knownnodes\", \"maxnodes\"):\n return\n with knownNodesLock:\n oldestList = sorted(\n knownNodes[recAddrStream],\n key=lambda x: x['lastseen']\n )[:knownNodesTrimAmount]\n for oldest in oldestList:\n del knownNodes[recAddrStream][oldest]\n", "path": "src/knownnodes.py"}]} | 2,217 | 113 |
gh_patches_debug_31249 | rasdani/github-patches | git_diff | scikit-hep__pyhf-2278 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Documentation of default Minuit strategy choice
### Summary
The documentation of `minuit_optimizer` lists `strategy` as a possible kwarg with default value of `None`, but it does not explain what that choice leads to. `pyhf` uses `0`/`1` depending on whether user-provided gradients are used (#1172 / #1183), and I believe it would be good to have that documented on the optimizer page. I'd be happy to submit a PR.
### Documentation Page Link
https://pyhf.readthedocs.io/en/v0.6.3/_generated/pyhf.optimize.opt_minuit.minuit_optimizer.html#pyhf.optimize.opt_minuit.minuit_optimizer
### Code of Conduct
- [X] I agree to follow the Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyhf/optimize/opt_minuit.py`
Content:
```
1 """Minuit Optimizer Class."""
2 from pyhf import exceptions
3 from pyhf.optimize.mixins import OptimizerMixin
4 import scipy
5 import iminuit
6
7
8 class minuit_optimizer(OptimizerMixin):
9 """
10 Optimizer that minimizes via :meth:`iminuit.Minuit.migrad`.
11 """
12
13 __slots__ = ['name', 'errordef', 'steps', 'strategy', 'tolerance']
14
15 def __init__(self, *args, **kwargs):
16 """
17 Create :class:`iminuit.Minuit` optimizer.
18
19 .. note::
20
21 ``errordef`` should be 1.0 for a least-squares cost function and 0.50
22 for negative log-likelihood function --- see `MINUIT: Function Minimization
23 and Error Analysis Reference Manual <https://cdsweb.cern.ch/record/2296388/>`_
24 Section 7.1: Function normalization and ERROR DEF.
25 This parameter is sometimes called ``UP`` in the ``MINUIT`` docs.
26
27
28 Args:
29 errordef (:obj:`float`): See minuit docs. Default is ``1.0``.
30 steps (:obj:`int`): Number of steps for the bounds. Default is ``1000``.
31 strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is ``None``.
32 tolerance (:obj:`float`): Tolerance for termination.
33 See specific optimizer for detailed meaning.
34 Default is ``0.1``.
35 """
36 self.name = 'minuit'
37 self.errordef = kwargs.pop('errordef', 1)
38 self.steps = kwargs.pop('steps', 1000)
39 self.strategy = kwargs.pop('strategy', None)
40 self.tolerance = kwargs.pop('tolerance', 0.1)
41 super().__init__(*args, **kwargs)
42
43 def _get_minimizer(
44 self,
45 objective_and_grad,
46 init_pars,
47 init_bounds,
48 fixed_vals=None,
49 do_grad=False,
50 par_names=None,
51 ):
52 fixed_vals = fixed_vals or []
53 # Minuit wants True/False for each parameter
54 fixed_bools = [False] * len(init_pars)
55 for index, val in fixed_vals:
56 fixed_bools[index] = True
57 init_pars[index] = val
58
59 # Minuit requires jac=callable
60 if do_grad:
61 wrapped_objective = lambda pars: objective_and_grad(pars)[0] # noqa: E731
62 jac = lambda pars: objective_and_grad(pars)[1] # noqa: E731
63 else:
64 wrapped_objective = objective_and_grad
65 jac = None
66
67 minuit = iminuit.Minuit(wrapped_objective, init_pars, grad=jac, name=par_names)
68 minuit.limits = init_bounds
69 minuit.fixed = fixed_bools
70 minuit.print_level = self.verbose
71 minuit.errordef = self.errordef
72 return minuit
73
74 def _minimize(
75 self,
76 minimizer,
77 func,
78 x0,
79 do_grad=False,
80 bounds=None,
81 fixed_vals=None,
82 options={},
83 ):
84 """
85 Same signature as :func:`scipy.optimize.minimize`.
86
87 Note: an additional `minuit` is injected into the fitresult to get the
88 underlying minimizer.
89
90 Minimizer Options:
91 * maxiter (:obj:`int`): Maximum number of iterations. Default is ``100000``.
92 * strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`.
93 Default is to configure in response to ``do_grad``.
94 * tolerance (:obj:`float`): Tolerance for termination.
95 See specific optimizer for detailed meaning.
96 Default is ``0.1``.
97
98 Returns:
99 fitresult (scipy.optimize.OptimizeResult): the fit result
100 """
101 maxiter = options.pop('maxiter', self.maxiter)
102 # 0: Fast, user-provided gradient
103 # 1: Default, no user-provided gradient
104 strategy = options.pop(
105 'strategy', self.strategy if self.strategy is not None else not do_grad
106 )
107 tolerance = options.pop('tolerance', self.tolerance)
108 if options:
109 raise exceptions.Unsupported(
110 f"Unsupported options were passed in: {list(options)}."
111 )
112
113 minimizer.strategy = strategy
114 minimizer.tol = tolerance
115 minimizer.migrad(ncall=maxiter)
116 # Following lines below come from:
117 # https://github.com/scikit-hep/iminuit/blob/23bad7697e39d363f259ca8349684df939b1b2e6/src/iminuit/_minimize.py#L111-L130
118 message = "Optimization terminated successfully."
119 if not minimizer.valid:
120 message = "Optimization failed."
121 fmin = minimizer.fmin
122 if fmin.has_reached_call_limit:
123 message += " Call limit was reached."
124 if fmin.is_above_max_edm:
125 message += " Estimated distance to minimum too large."
126
127 hess_inv = None
128 corr = None
129 unc = None
130 if minimizer.valid:
131 # Extra call to hesse() after migrad() is always needed for good error estimates. If you pass a user-provided gradient to MINUIT, convergence is faster.
132 minimizer.hesse()
133 hess_inv = minimizer.covariance
134 corr = hess_inv.correlation()
135 unc = minimizer.errors
136
137 return scipy.optimize.OptimizeResult(
138 x=minimizer.values,
139 unc=unc,
140 corr=corr,
141 success=minimizer.valid,
142 fun=minimizer.fval,
143 hess_inv=hess_inv,
144 message=message,
145 nfev=minimizer.nfcn,
146 njev=minimizer.ngrad,
147 minuit=minimizer,
148 )
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pyhf/optimize/opt_minuit.py b/src/pyhf/optimize/opt_minuit.py
--- a/src/pyhf/optimize/opt_minuit.py
+++ b/src/pyhf/optimize/opt_minuit.py
@@ -28,7 +28,10 @@
Args:
errordef (:obj:`float`): See minuit docs. Default is ``1.0``.
steps (:obj:`int`): Number of steps for the bounds. Default is ``1000``.
- strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is ``None``.
+ strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`.
+ Default is ``None``, which results in either
+ :attr:`iminuit.Minuit.strategy` ``0`` or ``1`` from the evaluation of
+ ``int(not pyhf.tensorlib.default_do_grad)``.
tolerance (:obj:`float`): Tolerance for termination.
See specific optimizer for detailed meaning.
Default is ``0.1``.
@@ -99,11 +102,14 @@
fitresult (scipy.optimize.OptimizeResult): the fit result
"""
maxiter = options.pop('maxiter', self.maxiter)
- # 0: Fast, user-provided gradient
- # 1: Default, no user-provided gradient
- strategy = options.pop(
- 'strategy', self.strategy if self.strategy is not None else not do_grad
- )
+ # do_grad value results in iminuit.Minuit.strategy of either:
+ # 0: Fast. Does not check a user-provided gradient.
+ # 1: Default. Checks user-provided gradient against numerical gradient.
+ strategy = options.pop("strategy", self.strategy)
+ # Guard against None from either self.strategy defaulting to None or
+ # passing strategy=None as options kwarg
+ if strategy is None:
+ strategy = 0 if do_grad else 1
tolerance = options.pop('tolerance', self.tolerance)
if options:
raise exceptions.Unsupported(
| {"golden_diff": "diff --git a/src/pyhf/optimize/opt_minuit.py b/src/pyhf/optimize/opt_minuit.py\n--- a/src/pyhf/optimize/opt_minuit.py\n+++ b/src/pyhf/optimize/opt_minuit.py\n@@ -28,7 +28,10 @@\n Args:\n errordef (:obj:`float`): See minuit docs. Default is ``1.0``.\n steps (:obj:`int`): Number of steps for the bounds. Default is ``1000``.\n- strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is ``None``.\n+ strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`.\n+ Default is ``None``, which results in either\n+ :attr:`iminuit.Minuit.strategy` ``0`` or ``1`` from the evaluation of\n+ ``int(not pyhf.tensorlib.default_do_grad)``.\n tolerance (:obj:`float`): Tolerance for termination.\n See specific optimizer for detailed meaning.\n Default is ``0.1``.\n@@ -99,11 +102,14 @@\n fitresult (scipy.optimize.OptimizeResult): the fit result\n \"\"\"\n maxiter = options.pop('maxiter', self.maxiter)\n- # 0: Fast, user-provided gradient\n- # 1: Default, no user-provided gradient\n- strategy = options.pop(\n- 'strategy', self.strategy if self.strategy is not None else not do_grad\n- )\n+ # do_grad value results in iminuit.Minuit.strategy of either:\n+ # 0: Fast. Does not check a user-provided gradient.\n+ # 1: Default. Checks user-provided gradient against numerical gradient.\n+ strategy = options.pop(\"strategy\", self.strategy)\n+ # Guard against None from either self.strategy defaulting to None or\n+ # passing strategy=None as options kwarg\n+ if strategy is None:\n+ strategy = 0 if do_grad else 1\n tolerance = options.pop('tolerance', self.tolerance)\n if options:\n raise exceptions.Unsupported(\n", "issue": "Documentation of default Minuit strategy choice\n### Summary\n\nThe documentation of `minuit_optimizer` lists `strategy` as a possible kwarg with default value of `None`, but it does not explain what that choice leads to. `pyhf` uses `0`/`1` depending on whether user-provided gradients are used (#1172 / #1183), and I believe it would be good to have that documented on the optimizer page. I'd be happy to submit a PR.\n\n### Documentation Page Link\n\nhttps://pyhf.readthedocs.io/en/v0.6.3/_generated/pyhf.optimize.opt_minuit.minuit_optimizer.html#pyhf.optimize.opt_minuit.minuit_optimizer\n\n### Code of Conduct\n\n- [X] I agree to follow the Code of Conduct\n", "before_files": [{"content": "\"\"\"Minuit Optimizer Class.\"\"\"\nfrom pyhf import exceptions\nfrom pyhf.optimize.mixins import OptimizerMixin\nimport scipy\nimport iminuit\n\n\nclass minuit_optimizer(OptimizerMixin):\n \"\"\"\n Optimizer that minimizes via :meth:`iminuit.Minuit.migrad`.\n \"\"\"\n\n __slots__ = ['name', 'errordef', 'steps', 'strategy', 'tolerance']\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Create :class:`iminuit.Minuit` optimizer.\n\n .. note::\n\n ``errordef`` should be 1.0 for a least-squares cost function and 0.50\n for negative log-likelihood function --- see `MINUIT: Function Minimization\n and Error Analysis Reference Manual <https://cdsweb.cern.ch/record/2296388/>`_\n Section 7.1: Function normalization and ERROR DEF.\n This parameter is sometimes called ``UP`` in the ``MINUIT`` docs.\n\n\n Args:\n errordef (:obj:`float`): See minuit docs. Default is ``1.0``.\n steps (:obj:`int`): Number of steps for the bounds. Default is ``1000``.\n strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`. Default is ``None``.\n tolerance (:obj:`float`): Tolerance for termination.\n See specific optimizer for detailed meaning.\n Default is ``0.1``.\n \"\"\"\n self.name = 'minuit'\n self.errordef = kwargs.pop('errordef', 1)\n self.steps = kwargs.pop('steps', 1000)\n self.strategy = kwargs.pop('strategy', None)\n self.tolerance = kwargs.pop('tolerance', 0.1)\n super().__init__(*args, **kwargs)\n\n def _get_minimizer(\n self,\n objective_and_grad,\n init_pars,\n init_bounds,\n fixed_vals=None,\n do_grad=False,\n par_names=None,\n ):\n fixed_vals = fixed_vals or []\n # Minuit wants True/False for each parameter\n fixed_bools = [False] * len(init_pars)\n for index, val in fixed_vals:\n fixed_bools[index] = True\n init_pars[index] = val\n\n # Minuit requires jac=callable\n if do_grad:\n wrapped_objective = lambda pars: objective_and_grad(pars)[0] # noqa: E731\n jac = lambda pars: objective_and_grad(pars)[1] # noqa: E731\n else:\n wrapped_objective = objective_and_grad\n jac = None\n\n minuit = iminuit.Minuit(wrapped_objective, init_pars, grad=jac, name=par_names)\n minuit.limits = init_bounds\n minuit.fixed = fixed_bools\n minuit.print_level = self.verbose\n minuit.errordef = self.errordef\n return minuit\n\n def _minimize(\n self,\n minimizer,\n func,\n x0,\n do_grad=False,\n bounds=None,\n fixed_vals=None,\n options={},\n ):\n \"\"\"\n Same signature as :func:`scipy.optimize.minimize`.\n\n Note: an additional `minuit` is injected into the fitresult to get the\n underlying minimizer.\n\n Minimizer Options:\n * maxiter (:obj:`int`): Maximum number of iterations. Default is ``100000``.\n * strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`.\n Default is to configure in response to ``do_grad``.\n * tolerance (:obj:`float`): Tolerance for termination.\n See specific optimizer for detailed meaning.\n Default is ``0.1``.\n\n Returns:\n fitresult (scipy.optimize.OptimizeResult): the fit result\n \"\"\"\n maxiter = options.pop('maxiter', self.maxiter)\n # 0: Fast, user-provided gradient\n # 1: Default, no user-provided gradient\n strategy = options.pop(\n 'strategy', self.strategy if self.strategy is not None else not do_grad\n )\n tolerance = options.pop('tolerance', self.tolerance)\n if options:\n raise exceptions.Unsupported(\n f\"Unsupported options were passed in: {list(options)}.\"\n )\n\n minimizer.strategy = strategy\n minimizer.tol = tolerance\n minimizer.migrad(ncall=maxiter)\n # Following lines below come from:\n # https://github.com/scikit-hep/iminuit/blob/23bad7697e39d363f259ca8349684df939b1b2e6/src/iminuit/_minimize.py#L111-L130\n message = \"Optimization terminated successfully.\"\n if not minimizer.valid:\n message = \"Optimization failed.\"\n fmin = minimizer.fmin\n if fmin.has_reached_call_limit:\n message += \" Call limit was reached.\"\n if fmin.is_above_max_edm:\n message += \" Estimated distance to minimum too large.\"\n\n hess_inv = None\n corr = None\n unc = None\n if minimizer.valid:\n # Extra call to hesse() after migrad() is always needed for good error estimates. If you pass a user-provided gradient to MINUIT, convergence is faster.\n minimizer.hesse()\n hess_inv = minimizer.covariance\n corr = hess_inv.correlation()\n unc = minimizer.errors\n\n return scipy.optimize.OptimizeResult(\n x=minimizer.values,\n unc=unc,\n corr=corr,\n success=minimizer.valid,\n fun=minimizer.fval,\n hess_inv=hess_inv,\n message=message,\n nfev=minimizer.nfcn,\n njev=minimizer.ngrad,\n minuit=minimizer,\n )\n", "path": "src/pyhf/optimize/opt_minuit.py"}], "after_files": [{"content": "\"\"\"Minuit Optimizer Class.\"\"\"\nfrom pyhf import exceptions\nfrom pyhf.optimize.mixins import OptimizerMixin\nimport scipy\nimport iminuit\n\n\nclass minuit_optimizer(OptimizerMixin):\n \"\"\"\n Optimizer that minimizes via :meth:`iminuit.Minuit.migrad`.\n \"\"\"\n\n __slots__ = ['name', 'errordef', 'steps', 'strategy', 'tolerance']\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Create :class:`iminuit.Minuit` optimizer.\n\n .. note::\n\n ``errordef`` should be 1.0 for a least-squares cost function and 0.50\n for negative log-likelihood function --- see `MINUIT: Function Minimization\n and Error Analysis Reference Manual <https://cdsweb.cern.ch/record/2296388/>`_\n Section 7.1: Function normalization and ERROR DEF.\n This parameter is sometimes called ``UP`` in the ``MINUIT`` docs.\n\n\n Args:\n errordef (:obj:`float`): See minuit docs. Default is ``1.0``.\n steps (:obj:`int`): Number of steps for the bounds. Default is ``1000``.\n strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`.\n Default is ``None``, which results in either\n :attr:`iminuit.Minuit.strategy` ``0`` or ``1`` from the evaluation of\n ``int(not pyhf.tensorlib.default_do_grad)``.\n tolerance (:obj:`float`): Tolerance for termination.\n See specific optimizer for detailed meaning.\n Default is ``0.1``.\n \"\"\"\n self.name = 'minuit'\n self.errordef = kwargs.pop('errordef', 1)\n self.steps = kwargs.pop('steps', 1000)\n self.strategy = kwargs.pop('strategy', None)\n self.tolerance = kwargs.pop('tolerance', 0.1)\n super().__init__(*args, **kwargs)\n\n def _get_minimizer(\n self,\n objective_and_grad,\n init_pars,\n init_bounds,\n fixed_vals=None,\n do_grad=False,\n par_names=None,\n ):\n fixed_vals = fixed_vals or []\n # Minuit wants True/False for each parameter\n fixed_bools = [False] * len(init_pars)\n for index, val in fixed_vals:\n fixed_bools[index] = True\n init_pars[index] = val\n\n # Minuit requires jac=callable\n if do_grad:\n wrapped_objective = lambda pars: objective_and_grad(pars)[0] # noqa: E731\n jac = lambda pars: objective_and_grad(pars)[1] # noqa: E731\n else:\n wrapped_objective = objective_and_grad\n jac = None\n\n minuit = iminuit.Minuit(wrapped_objective, init_pars, grad=jac, name=par_names)\n minuit.limits = init_bounds\n minuit.fixed = fixed_bools\n minuit.print_level = self.verbose\n minuit.errordef = self.errordef\n return minuit\n\n def _minimize(\n self,\n minimizer,\n func,\n x0,\n do_grad=False,\n bounds=None,\n fixed_vals=None,\n options={},\n ):\n \"\"\"\n Same signature as :func:`scipy.optimize.minimize`.\n\n Note: an additional `minuit` is injected into the fitresult to get the\n underlying minimizer.\n\n Minimizer Options:\n * maxiter (:obj:`int`): Maximum number of iterations. Default is ``100000``.\n * strategy (:obj:`int`): See :attr:`iminuit.Minuit.strategy`.\n Default is to configure in response to ``do_grad``.\n * tolerance (:obj:`float`): Tolerance for termination.\n See specific optimizer for detailed meaning.\n Default is ``0.1``.\n\n Returns:\n fitresult (scipy.optimize.OptimizeResult): the fit result\n \"\"\"\n maxiter = options.pop('maxiter', self.maxiter)\n # do_grad value results in iminuit.Minuit.strategy of either:\n # 0: Fast. Does not check a user-provided gradient.\n # 1: Default. Checks user-provided gradient against numerical gradient.\n strategy = options.pop(\"strategy\", self.strategy)\n # Guard against None from either self.strategy defaulting to None or\n # passing strategy=None as options kwarg\n if strategy is None:\n strategy = 0 if do_grad else 1\n tolerance = options.pop('tolerance', self.tolerance)\n if options:\n raise exceptions.Unsupported(\n f\"Unsupported options were passed in: {list(options)}.\"\n )\n\n minimizer.strategy = strategy\n minimizer.tol = tolerance\n minimizer.migrad(ncall=maxiter)\n # Following lines below come from:\n # https://github.com/scikit-hep/iminuit/blob/23bad7697e39d363f259ca8349684df939b1b2e6/src/iminuit/_minimize.py#L111-L130\n message = \"Optimization terminated successfully.\"\n if not minimizer.valid:\n message = \"Optimization failed.\"\n fmin = minimizer.fmin\n if fmin.has_reached_call_limit:\n message += \" Call limit was reached.\"\n if fmin.is_above_max_edm:\n message += \" Estimated distance to minimum too large.\"\n\n hess_inv = None\n corr = None\n unc = None\n if minimizer.valid:\n # Extra call to hesse() after migrad() is always needed for good error estimates. If you pass a user-provided gradient to MINUIT, convergence is faster.\n minimizer.hesse()\n hess_inv = minimizer.covariance\n corr = hess_inv.correlation()\n unc = minimizer.errors\n\n return scipy.optimize.OptimizeResult(\n x=minimizer.values,\n unc=unc,\n corr=corr,\n success=minimizer.valid,\n fun=minimizer.fval,\n hess_inv=hess_inv,\n message=message,\n nfev=minimizer.nfcn,\n njev=minimizer.ngrad,\n minuit=minimizer,\n )\n", "path": "src/pyhf/optimize/opt_minuit.py"}]} | 2,069 | 471 |
gh_patches_debug_15378 | rasdani/github-patches | git_diff | mkdocs__mkdocs-244 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Default theme files are added when a custom theme is used and no "parent" theme is specified
I'm using `theme_dir` to specify a custom theme directory with the latest versions of Bootstrap and Font Awesome, and I'm not specifying `theme` because I don't want to inherit files from a parent theme. However, since `theme` defaults to 'mkdocs', my site directory ends up with two different versions of these libraries in addition to other files I'm not using like prettify.
I tried explicitly setting `theme` to null, but that didn't work. As a hack, setting `theme` to a custom name works for `mkdocs build`, but not `mkdocs serve`.
This wasn't an issue with v0.9, but I've noticed it since upgrading to v0.11.1.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/config.py`
Content:
```
1 # coding: utf-8
2
3 from mkdocs import utils
4 from mkdocs.compat import urlparse
5 from mkdocs.exceptions import ConfigurationError
6
7 import os
8 import yaml
9
10 DEFAULT_CONFIG = {
11 'site_name': None,
12 'pages': None,
13
14 'site_url': None,
15 'site_description': None,
16 'site_author': None,
17 'site_favicon': None,
18
19 'theme': 'mkdocs',
20 'docs_dir': 'docs',
21 'site_dir': 'site',
22 'theme_dir': None,
23
24 'copyright': None,
25 'google_analytics': None,
26
27 # The address on which to serve the livereloading docs server.
28 'dev_addr': '127.0.0.1:8000',
29
30 # If `True`, use `<page_name>/index.hmtl` style files with hyperlinks to the directory.
31 # If `False`, use `<page_name>.html style file with hyperlinks to the file.
32 # True generates nicer URLs, but False is useful if browsing the output on a filesystem.
33 'use_directory_urls': True,
34
35 # Specify a link to the project source repo to be included
36 # in the documentation pages.
37 'repo_url': None,
38
39 # A name to use for the link to the project source repo.
40 # Default: If repo_url is unset then None, otherwise
41 # "GitHub" or "Bitbucket" for known url or Hostname for unknown urls.
42 'repo_name': None,
43
44 # Specify which css or javascript files from the docs
45 # directionary should be additionally included in the site.
46 # Default: List of all .css and .js files in the docs dir.
47 'extra_css': None,
48 'extra_javascript': None,
49
50 # Determine if the site should include the nav and next/prev elements.
51 # Default: True if the site has more than one page, False otherwise.
52 'include_nav': None,
53 'include_next_prev': None,
54
55 # PyMarkdown extension names.
56 'markdown_extensions': (),
57
58 # Determine if the site should generate a json search index and include
59 # search elements in the theme. - TODO
60 'include_search': False,
61
62 # Determine if the site should include a 404.html page.
63 # TODO: Implment this. Make this None, have it True if a 404.html
64 # template exists in the theme or docs dir.
65 'include_404': False,
66
67 # Determine if the site should include a sitemap.xml page.
68 # TODO: Implement this. Make this None, have it True if a sitemap.xml
69 # template exists in the theme or docs dir.
70 'include_sitemap': False,
71 }
72
73
74 def load_config(filename='mkdocs.yml', options=None):
75 options = options or {}
76 if 'config' in options:
77 filename = options['config']
78 if not os.path.exists(filename):
79 raise ConfigurationError("Config file '%s' does not exist." % filename)
80 with open(filename, 'r') as fp:
81 user_config = yaml.load(fp)
82 user_config.update(options)
83 return validate_config(user_config)
84
85
86 def validate_config(user_config):
87 config = DEFAULT_CONFIG.copy()
88 config.update(user_config)
89
90 if not config['site_name']:
91 raise ConfigurationError("Config must contain 'site_name' setting.")
92
93 # If not specified, then the 'pages' config simply includes all
94 # markdown files in the docs dir, without generating any header items
95 # for them.
96 pages = []
97 extra_css = []
98 extra_javascript = []
99 for (dirpath, dirnames, filenames) in os.walk(config['docs_dir']):
100 for filename in sorted(filenames):
101 fullpath = os.path.join(dirpath, filename)
102 relpath = os.path.relpath(fullpath, config['docs_dir'])
103
104 if utils.is_markdown_file(filename):
105 # index pages should always be the first listed page.
106 if os.path.splitext(relpath)[0] == 'index':
107 pages.insert(0, relpath)
108 else:
109 pages.append(relpath)
110 elif utils.is_css_file(filename):
111 extra_css.append(relpath)
112 elif utils.is_javascript_file(filename):
113 extra_javascript.append(relpath)
114
115 if config['pages'] is None:
116 config['pages'] = pages
117
118 if config['extra_css'] is None:
119 config['extra_css'] = extra_css
120
121 if config['extra_javascript'] is None:
122 config['extra_javascript'] = extra_javascript
123
124 package_dir = os.path.dirname(__file__)
125 theme_dir = [os.path.join(package_dir, 'themes', config['theme'])]
126
127 if config['theme_dir'] is not None:
128 theme_dir.insert(0, config['theme_dir'])
129
130 config['theme_dir'] = theme_dir
131
132 if config['repo_url'] is not None and config['repo_name'] is None:
133 repo_host = urlparse(config['repo_url']).netloc.lower()
134 if repo_host == 'github.com':
135 config['repo_name'] = 'GitHub'
136 elif repo_host == 'bitbucket.com':
137 config['repo_name'] = 'Bitbucket'
138 else:
139 config['repo_name'] = repo_host.split('.')[0].title()
140
141 if config['include_next_prev'] is None:
142 config['include_next_prev'] = len(config['pages']) > 1
143
144 if config['include_nav'] is None:
145 config['include_nav'] = len(config['pages']) > 1
146
147 # To Do:
148
149 # The docs dir must exist.
150 # The theme dir must exist.
151 # Ensure 'theme' is one of 'mkdocs', 'readthedocs', 'custom'
152 # A homepage 'index' must exist.
153 # The theme 'base.html' file must exist.
154 # Cannot set repo_name without setting repo_url.
155 # Cannot set 'include_next_prev: true' when only one page exists.
156 # Cannot set 'include_nav: true' when only one page exists.
157 # Error if any config keys provided that are not in the DEFAULT_CONFIG.
158
159 return config
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mkdocs/config.py b/mkdocs/config.py
--- a/mkdocs/config.py
+++ b/mkdocs/config.py
@@ -85,6 +85,9 @@
def validate_config(user_config):
config = DEFAULT_CONFIG.copy()
+
+ theme_in_config = 'theme' in user_config
+
config.update(user_config)
if not config['site_name']:
@@ -125,6 +128,10 @@
theme_dir = [os.path.join(package_dir, 'themes', config['theme'])]
if config['theme_dir'] is not None:
+ # If the user has given us a custom theme but not a
+ # builtin theme name then we don't want to merge them.
+ if not theme_in_config:
+ theme_dir = []
theme_dir.insert(0, config['theme_dir'])
config['theme_dir'] = theme_dir
| {"golden_diff": "diff --git a/mkdocs/config.py b/mkdocs/config.py\n--- a/mkdocs/config.py\n+++ b/mkdocs/config.py\n@@ -85,6 +85,9 @@\n \n def validate_config(user_config):\n config = DEFAULT_CONFIG.copy()\n+\n+ theme_in_config = 'theme' in user_config\n+\n config.update(user_config)\n \n if not config['site_name']:\n@@ -125,6 +128,10 @@\n theme_dir = [os.path.join(package_dir, 'themes', config['theme'])]\n \n if config['theme_dir'] is not None:\n+ # If the user has given us a custom theme but not a\n+ # builtin theme name then we don't want to merge them.\n+ if not theme_in_config:\n+ theme_dir = []\n theme_dir.insert(0, config['theme_dir'])\n \n config['theme_dir'] = theme_dir\n", "issue": "Default theme files are added when a custom theme is used and no \"parent\" theme is specified\nI'm using `theme_dir` to specify a custom theme directory with the latest versions of Bootstrap and Font Awesome, and I'm not specifying `theme` because I don't want to inherit files from a parent theme. However, since `theme` defaults to 'mkdocs', my site directory ends up with two different versions of these libraries in addition to other files I'm not using like prettify.\n\nI tried explicitly setting `theme` to null, but that didn't work. As a hack, setting `theme` to a custom name works for `mkdocs build`, but not `mkdocs serve`.\n\nThis wasn't an issue with v0.9, but I've noticed it since upgrading to v0.11.1.\n\n", "before_files": [{"content": "# coding: utf-8\n\nfrom mkdocs import utils\nfrom mkdocs.compat import urlparse\nfrom mkdocs.exceptions import ConfigurationError\n\nimport os\nimport yaml\n\nDEFAULT_CONFIG = {\n 'site_name': None,\n 'pages': None,\n\n 'site_url': None,\n 'site_description': None,\n 'site_author': None,\n 'site_favicon': None,\n\n 'theme': 'mkdocs',\n 'docs_dir': 'docs',\n 'site_dir': 'site',\n 'theme_dir': None,\n\n 'copyright': None,\n 'google_analytics': None,\n\n # The address on which to serve the livereloading docs server.\n 'dev_addr': '127.0.0.1:8000',\n\n # If `True`, use `<page_name>/index.hmtl` style files with hyperlinks to the directory.\n # If `False`, use `<page_name>.html style file with hyperlinks to the file.\n # True generates nicer URLs, but False is useful if browsing the output on a filesystem.\n 'use_directory_urls': True,\n\n # Specify a link to the project source repo to be included\n # in the documentation pages.\n 'repo_url': None,\n\n # A name to use for the link to the project source repo.\n # Default: If repo_url is unset then None, otherwise\n # \"GitHub\" or \"Bitbucket\" for known url or Hostname for unknown urls.\n 'repo_name': None,\n\n # Specify which css or javascript files from the docs\n # directionary should be additionally included in the site.\n # Default: List of all .css and .js files in the docs dir.\n 'extra_css': None,\n 'extra_javascript': None,\n\n # Determine if the site should include the nav and next/prev elements.\n # Default: True if the site has more than one page, False otherwise.\n 'include_nav': None,\n 'include_next_prev': None,\n\n # PyMarkdown extension names.\n 'markdown_extensions': (),\n\n # Determine if the site should generate a json search index and include\n # search elements in the theme. - TODO\n 'include_search': False,\n\n # Determine if the site should include a 404.html page.\n # TODO: Implment this. Make this None, have it True if a 404.html\n # template exists in the theme or docs dir.\n 'include_404': False,\n\n # Determine if the site should include a sitemap.xml page.\n # TODO: Implement this. Make this None, have it True if a sitemap.xml\n # template exists in the theme or docs dir.\n 'include_sitemap': False,\n}\n\n\ndef load_config(filename='mkdocs.yml', options=None):\n options = options or {}\n if 'config' in options:\n filename = options['config']\n if not os.path.exists(filename):\n raise ConfigurationError(\"Config file '%s' does not exist.\" % filename)\n with open(filename, 'r') as fp:\n user_config = yaml.load(fp)\n user_config.update(options)\n return validate_config(user_config)\n\n\ndef validate_config(user_config):\n config = DEFAULT_CONFIG.copy()\n config.update(user_config)\n\n if not config['site_name']:\n raise ConfigurationError(\"Config must contain 'site_name' setting.\")\n\n # If not specified, then the 'pages' config simply includes all\n # markdown files in the docs dir, without generating any header items\n # for them.\n pages = []\n extra_css = []\n extra_javascript = []\n for (dirpath, dirnames, filenames) in os.walk(config['docs_dir']):\n for filename in sorted(filenames):\n fullpath = os.path.join(dirpath, filename)\n relpath = os.path.relpath(fullpath, config['docs_dir'])\n\n if utils.is_markdown_file(filename):\n # index pages should always be the first listed page.\n if os.path.splitext(relpath)[0] == 'index':\n pages.insert(0, relpath)\n else:\n pages.append(relpath)\n elif utils.is_css_file(filename):\n extra_css.append(relpath)\n elif utils.is_javascript_file(filename):\n extra_javascript.append(relpath)\n\n if config['pages'] is None:\n config['pages'] = pages\n\n if config['extra_css'] is None:\n config['extra_css'] = extra_css\n\n if config['extra_javascript'] is None:\n config['extra_javascript'] = extra_javascript\n\n package_dir = os.path.dirname(__file__)\n theme_dir = [os.path.join(package_dir, 'themes', config['theme'])]\n\n if config['theme_dir'] is not None:\n theme_dir.insert(0, config['theme_dir'])\n\n config['theme_dir'] = theme_dir\n\n if config['repo_url'] is not None and config['repo_name'] is None:\n repo_host = urlparse(config['repo_url']).netloc.lower()\n if repo_host == 'github.com':\n config['repo_name'] = 'GitHub'\n elif repo_host == 'bitbucket.com':\n config['repo_name'] = 'Bitbucket'\n else:\n config['repo_name'] = repo_host.split('.')[0].title()\n\n if config['include_next_prev'] is None:\n config['include_next_prev'] = len(config['pages']) > 1\n\n if config['include_nav'] is None:\n config['include_nav'] = len(config['pages']) > 1\n\n # To Do:\n\n # The docs dir must exist.\n # The theme dir must exist.\n # Ensure 'theme' is one of 'mkdocs', 'readthedocs', 'custom'\n # A homepage 'index' must exist.\n # The theme 'base.html' file must exist.\n # Cannot set repo_name without setting repo_url.\n # Cannot set 'include_next_prev: true' when only one page exists.\n # Cannot set 'include_nav: true' when only one page exists.\n # Error if any config keys provided that are not in the DEFAULT_CONFIG.\n\n return config\n", "path": "mkdocs/config.py"}], "after_files": [{"content": "# coding: utf-8\n\nfrom mkdocs import utils\nfrom mkdocs.compat import urlparse\nfrom mkdocs.exceptions import ConfigurationError\n\nimport os\nimport yaml\n\nDEFAULT_CONFIG = {\n 'site_name': None,\n 'pages': None,\n\n 'site_url': None,\n 'site_description': None,\n 'site_author': None,\n 'site_favicon': None,\n\n 'theme': 'mkdocs',\n 'docs_dir': 'docs',\n 'site_dir': 'site',\n 'theme_dir': None,\n\n 'copyright': None,\n 'google_analytics': None,\n\n # The address on which to serve the livereloading docs server.\n 'dev_addr': '127.0.0.1:8000',\n\n # If `True`, use `<page_name>/index.hmtl` style files with hyperlinks to the directory.\n # If `False`, use `<page_name>.html style file with hyperlinks to the file.\n # True generates nicer URLs, but False is useful if browsing the output on a filesystem.\n 'use_directory_urls': True,\n\n # Specify a link to the project source repo to be included\n # in the documentation pages.\n 'repo_url': None,\n\n # A name to use for the link to the project source repo.\n # Default: If repo_url is unset then None, otherwise\n # \"GitHub\" or \"Bitbucket\" for known url or Hostname for unknown urls.\n 'repo_name': None,\n\n # Specify which css or javascript files from the docs\n # directionary should be additionally included in the site.\n # Default: List of all .css and .js files in the docs dir.\n 'extra_css': None,\n 'extra_javascript': None,\n\n # Determine if the site should include the nav and next/prev elements.\n # Default: True if the site has more than one page, False otherwise.\n 'include_nav': None,\n 'include_next_prev': None,\n\n # PyMarkdown extension names.\n 'markdown_extensions': (),\n\n # Determine if the site should generate a json search index and include\n # search elements in the theme. - TODO\n 'include_search': False,\n\n # Determine if the site should include a 404.html page.\n # TODO: Implment this. Make this None, have it True if a 404.html\n # template exists in the theme or docs dir.\n 'include_404': False,\n\n # Determine if the site should include a sitemap.xml page.\n # TODO: Implement this. Make this None, have it True if a sitemap.xml\n # template exists in the theme or docs dir.\n 'include_sitemap': False,\n}\n\n\ndef load_config(filename='mkdocs.yml', options=None):\n options = options or {}\n if 'config' in options:\n filename = options['config']\n if not os.path.exists(filename):\n raise ConfigurationError(\"Config file '%s' does not exist.\" % filename)\n with open(filename, 'r') as fp:\n user_config = yaml.load(fp)\n user_config.update(options)\n return validate_config(user_config)\n\n\ndef validate_config(user_config):\n config = DEFAULT_CONFIG.copy()\n\n theme_in_config = 'theme' in user_config\n\n config.update(user_config)\n\n if not config['site_name']:\n raise ConfigurationError(\"Config must contain 'site_name' setting.\")\n\n # If not specified, then the 'pages' config simply includes all\n # markdown files in the docs dir, without generating any header items\n # for them.\n pages = []\n extra_css = []\n extra_javascript = []\n for (dirpath, dirnames, filenames) in os.walk(config['docs_dir']):\n for filename in sorted(filenames):\n fullpath = os.path.join(dirpath, filename)\n relpath = os.path.relpath(fullpath, config['docs_dir'])\n\n if utils.is_markdown_file(filename):\n # index pages should always be the first listed page.\n if os.path.splitext(relpath)[0] == 'index':\n pages.insert(0, relpath)\n else:\n pages.append(relpath)\n elif utils.is_css_file(filename):\n extra_css.append(relpath)\n elif utils.is_javascript_file(filename):\n extra_javascript.append(relpath)\n\n if config['pages'] is None:\n config['pages'] = pages\n\n if config['extra_css'] is None:\n config['extra_css'] = extra_css\n\n if config['extra_javascript'] is None:\n config['extra_javascript'] = extra_javascript\n\n package_dir = os.path.dirname(__file__)\n theme_dir = [os.path.join(package_dir, 'themes', config['theme'])]\n\n if config['theme_dir'] is not None:\n # If the user has given us a custom theme but not a\n # builtin theme name then we don't want to merge them.\n if not theme_in_config:\n theme_dir = []\n theme_dir.insert(0, config['theme_dir'])\n\n config['theme_dir'] = theme_dir\n\n if config['repo_url'] is not None and config['repo_name'] is None:\n repo_host = urlparse(config['repo_url']).netloc.lower()\n if repo_host == 'github.com':\n config['repo_name'] = 'GitHub'\n elif repo_host == 'bitbucket.com':\n config['repo_name'] = 'Bitbucket'\n else:\n config['repo_name'] = repo_host.split('.')[0].title()\n\n if config['include_next_prev'] is None:\n config['include_next_prev'] = len(config['pages']) > 1\n\n if config['include_nav'] is None:\n config['include_nav'] = len(config['pages']) > 1\n\n # To Do:\n\n # The docs dir must exist.\n # The theme dir must exist.\n # Ensure 'theme' is one of 'mkdocs', 'readthedocs', 'custom'\n # A homepage 'index' must exist.\n # The theme 'base.html' file must exist.\n # Cannot set repo_name without setting repo_url.\n # Cannot set 'include_next_prev: true' when only one page exists.\n # Cannot set 'include_nav: true' when only one page exists.\n # Error if any config keys provided that are not in the DEFAULT_CONFIG.\n\n return config\n", "path": "mkdocs/config.py"}]} | 2,138 | 202 |
gh_patches_debug_16844 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1227 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Warning Check on Unused Parameter hides Error Check about Missing Parameter Type
*cfn-lint version: cfn-lint 0.25.3*
Parameters defined in a template, but not directly used, are not validated for missing attributes like `Type`.
For various reasons, we want to include parameters in our templates that are not used by resources in the templates and therefore disable `W2001` When this happens, the following template will not fail cfn-lint. If I uncomment the `Metadata` section, I will finally see the `E1012` failure. I should not have to resolve a Warning in order to unmask an Error.
```yaml
Parameters:
Foo:
Description: "Foo?"
Conditions:
AlwaysFalse: !Equals [ true, false ]
Resources:
# Metadata:
# Foo: !Ref Foo
NullResource:
Type: Custom::NullResource
Condition: AlwaysFalse
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/parameters/Configuration.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 from cfnlint.rules import CloudFormationLintRule
6 from cfnlint.rules import RuleMatch
7
8
9 class Configuration(CloudFormationLintRule):
10 """Check if Parameters are configured correctly"""
11 id = 'E2001'
12 shortdesc = 'Parameters have appropriate properties'
13 description = 'Making sure the parameters are properly configured'
14 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html'
15 tags = ['parameters']
16
17 valid_keys = [
18 'AllowedPattern',
19 'AllowedValues',
20 'ConstraintDescription',
21 'Default',
22 'Description',
23 'MaxLength',
24 'MaxValue',
25 'MinLength',
26 'MinValue',
27 'NoEcho',
28 'Type',
29 ]
30
31 def match(self, cfn):
32 """Check CloudFormation Parameters"""
33
34 matches = []
35
36 for paramname, paramvalue in cfn.get_parameters().items():
37 for propname, _ in paramvalue.items():
38 if propname not in self.valid_keys:
39 message = 'Parameter {0} has invalid property {1}'
40 matches.append(RuleMatch(
41 ['Parameters', paramname, propname],
42 message.format(paramname, propname)
43 ))
44
45 return matches
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/parameters/Configuration.py b/src/cfnlint/rules/parameters/Configuration.py
--- a/src/cfnlint/rules/parameters/Configuration.py
+++ b/src/cfnlint/rules/parameters/Configuration.py
@@ -28,6 +28,10 @@
'Type',
]
+ required_keys = [
+ 'Type'
+ ]
+
def match(self, cfn):
"""Check CloudFormation Parameters"""
@@ -41,5 +45,12 @@
['Parameters', paramname, propname],
message.format(paramname, propname)
))
+ for reqname in self.required_keys:
+ if reqname not in paramvalue.keys():
+ message = 'Parameter {0} is missing required property {1}'
+ matches.append(RuleMatch(
+ ['Parameters', paramname],
+ message.format(paramname, reqname)
+ ))
return matches
| {"golden_diff": "diff --git a/src/cfnlint/rules/parameters/Configuration.py b/src/cfnlint/rules/parameters/Configuration.py\n--- a/src/cfnlint/rules/parameters/Configuration.py\n+++ b/src/cfnlint/rules/parameters/Configuration.py\n@@ -28,6 +28,10 @@\n 'Type',\n ]\n \n+ required_keys = [\n+ 'Type'\n+ ]\n+\n def match(self, cfn):\n \"\"\"Check CloudFormation Parameters\"\"\"\n \n@@ -41,5 +45,12 @@\n ['Parameters', paramname, propname],\n message.format(paramname, propname)\n ))\n+ for reqname in self.required_keys:\n+ if reqname not in paramvalue.keys():\n+ message = 'Parameter {0} is missing required property {1}'\n+ matches.append(RuleMatch(\n+ ['Parameters', paramname],\n+ message.format(paramname, reqname)\n+ ))\n \n return matches\n", "issue": "Warning Check on Unused Parameter hides Error Check about Missing Parameter Type\n*cfn-lint version: cfn-lint 0.25.3*\r\n\r\nParameters defined in a template, but not directly used, are not validated for missing attributes like `Type`.\r\n\r\nFor various reasons, we want to include parameters in our templates that are not used by resources in the templates and therefore disable `W2001` When this happens, the following template will not fail cfn-lint. If I uncomment the `Metadata` section, I will finally see the `E1012` failure. I should not have to resolve a Warning in order to unmask an Error.\r\n\r\n```yaml\r\nParameters:\r\n Foo:\r\n Description: \"Foo?\"\r\nConditions:\r\n AlwaysFalse: !Equals [ true, false ]\r\nResources:\r\n # Metadata:\r\n # Foo: !Ref Foo\r\n NullResource:\r\n Type: Custom::NullResource\r\n Condition: AlwaysFalse\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass Configuration(CloudFormationLintRule):\n \"\"\"Check if Parameters are configured correctly\"\"\"\n id = 'E2001'\n shortdesc = 'Parameters have appropriate properties'\n description = 'Making sure the parameters are properly configured'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html'\n tags = ['parameters']\n\n valid_keys = [\n 'AllowedPattern',\n 'AllowedValues',\n 'ConstraintDescription',\n 'Default',\n 'Description',\n 'MaxLength',\n 'MaxValue',\n 'MinLength',\n 'MinValue',\n 'NoEcho',\n 'Type',\n ]\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Parameters\"\"\"\n\n matches = []\n\n for paramname, paramvalue in cfn.get_parameters().items():\n for propname, _ in paramvalue.items():\n if propname not in self.valid_keys:\n message = 'Parameter {0} has invalid property {1}'\n matches.append(RuleMatch(\n ['Parameters', paramname, propname],\n message.format(paramname, propname)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/parameters/Configuration.py"}], "after_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\n\nclass Configuration(CloudFormationLintRule):\n \"\"\"Check if Parameters are configured correctly\"\"\"\n id = 'E2001'\n shortdesc = 'Parameters have appropriate properties'\n description = 'Making sure the parameters are properly configured'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html'\n tags = ['parameters']\n\n valid_keys = [\n 'AllowedPattern',\n 'AllowedValues',\n 'ConstraintDescription',\n 'Default',\n 'Description',\n 'MaxLength',\n 'MaxValue',\n 'MinLength',\n 'MinValue',\n 'NoEcho',\n 'Type',\n ]\n\n required_keys = [\n 'Type'\n ]\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Parameters\"\"\"\n\n matches = []\n\n for paramname, paramvalue in cfn.get_parameters().items():\n for propname, _ in paramvalue.items():\n if propname not in self.valid_keys:\n message = 'Parameter {0} has invalid property {1}'\n matches.append(RuleMatch(\n ['Parameters', paramname, propname],\n message.format(paramname, propname)\n ))\n for reqname in self.required_keys:\n if reqname not in paramvalue.keys():\n message = 'Parameter {0} is missing required property {1}'\n matches.append(RuleMatch(\n ['Parameters', paramname],\n message.format(paramname, reqname)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/parameters/Configuration.py"}]} | 845 | 208 |
gh_patches_debug_2759 | rasdani/github-patches | git_diff | getnikola__nikola-3437 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The post_list plugin prevents 'else' functionality in templates
<!--
Before creating an issue:
* make sure you are using an up-to-date version of Nikola
* search for existing issues that might be related
Make sure to:
* provide information about your environment (below)
* include all the output you get, and any other information related to your problem
Nikola v7.6.4, as provided by Ubuntu, is NOT SUPPORTED.
If you are using this version, you should upgrade: https://getnikola.com/getting-started.html
-->
### Environment
**Python Version:**
3.7.8
**Nikola Version:**
8.1.1
**Operating System:**
Mac OS Catalina (10.15.5) / Ubuntu 19.10
### Description:
In the default template for the `post-list` plugin, namely `post_list_directive.tmpl`
```python
{% if posts %}
<ul class="post-list">
...
```
Which suggests that there is some possibility that the template will be called with no posts.
While in `list_post.tmpl`, which you can also use with `post-list`, we have this:
```python
{% if posts %}
<ul class="postlist">
{% for post in posts %}
<li><time class="listdate" datetime="{{ post.formatted_date('webiso') }}" title="{{ post.formatted_date(date_format)|e }}">{{ post.formatted_date(date_format)|e }}</time> <a href="{{ post.permalink() }}" class="listtitle">{{ post.title()|e }}</a></li>
{% endfor %}
</ul>
{% else %}
<p>{{ messages("No posts found.") }}</p>
{% endif %}
```
Which is obviously expected to be able to handle the situation when there are no posts.
However, when the plugin returns no posts, the `else` block is not executed. In fact, it appears that the template is not called at all when no posts are returned.
This is because of these lines in `post_list.py`, at around lines 221-222:
```python
if not posts:
return '', []
```
It seems that because the empty values are returned, processing is not passed to the template. Removing those lines fixes the problem and allows the template's `else` clause to work.
I can't see that this change breaks anything else, so I'll submit a pull request for it, unless someone has an objection.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/shortcode/post_list.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2013-2020 Udo Spallek, Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 """Post list shortcode."""
28
29
30 import operator
31 import os
32 import uuid
33
34 import natsort
35
36 from nikola import utils
37 from nikola.packages.datecond import date_in_range
38 from nikola.plugin_categories import ShortcodePlugin
39
40
41 class PostListShortcode(ShortcodePlugin):
42 """Provide a shortcode to create a list of posts.
43
44 Post List
45 =========
46 :Directive Arguments: None.
47 :Directive Options: lang, start, stop, reverse, sort, date, tags, categories, sections, slugs, post_type, template, id
48 :Directive Content: None.
49
50 The posts appearing in the list can be filtered by options.
51 *List slicing* is provided with the *start*, *stop* and *reverse* options.
52
53 The following not required options are recognized:
54
55 ``start`` : integer
56 The index of the first post to show.
57 A negative value like ``-3`` will show the *last* three posts in the
58 post-list.
59 Defaults to None.
60
61 ``stop`` : integer
62 The index of the last post to show.
63 A value negative value like ``-1`` will show every post, but not the
64 *last* in the post-list.
65 Defaults to None.
66
67 ``reverse`` : flag
68 Reverse the order of the post-list.
69 Defaults is to not reverse the order of posts.
70
71 ``sort`` : string
72 Sort post list by one of each post's attributes, usually ``title`` or a
73 custom ``priority``. Defaults to None (chronological sorting).
74
75 ``date`` : string
76 Show posts that match date range specified by this option. Format:
77
78 * comma-separated clauses (AND)
79 * clause: attribute comparison_operator value (spaces optional)
80 * attribute: year, month, day, hour, month, second, weekday, isoweekday; or empty for full datetime
81 * comparison_operator: == != <= >= < >
82 * value: integer, 'now', 'today', or dateutil-compatible date input
83
84 ``tags`` : string [, string...]
85 Filter posts to show only posts having at least one of the ``tags``.
86 Defaults to None.
87
88 ``require_all_tags`` : flag
89 Change tag filter behaviour to show only posts that have all specified ``tags``.
90 Defaults to False.
91
92 ``categories`` : string [, string...]
93 Filter posts to show only posts having one of the ``categories``.
94 Defaults to None.
95
96 ``sections`` : string [, string...]
97 Filter posts to show only posts having one of the ``sections``.
98 Defaults to None.
99
100 ``slugs`` : string [, string...]
101 Filter posts to show only posts having at least one of the ``slugs``.
102 Defaults to None.
103
104 ``post_type`` (or ``type``) : string
105 Show only ``posts``, ``pages`` or ``all``.
106 Replaces ``all``. Defaults to ``posts``.
107
108 ``lang`` : string
109 The language of post *titles* and *links*.
110 Defaults to default language.
111
112 ``template`` : string
113 The name of an alternative template to render the post-list.
114 Defaults to ``post_list_directive.tmpl``
115
116 ``id`` : string
117 A manual id for the post list.
118 Defaults to a random name composed by 'post_list_' + uuid.uuid4().hex.
119 """
120
121 name = "post_list"
122
123 def set_site(self, site):
124 """Set the site."""
125 super().set_site(site)
126 site.register_shortcode('post-list', self.handler)
127
128 def handler(self, start=None, stop=None, reverse=False, tags=None, require_all_tags=False, categories=None,
129 sections=None, slugs=None, post_type='post', type=False,
130 lang=None, template='post_list_directive.tmpl', sort=None,
131 id=None, data=None, state=None, site=None, date=None, filename=None, post=None):
132 """Generate HTML for post-list."""
133 if lang is None:
134 lang = utils.LocaleBorg().current_lang
135 if site.invariant: # for testing purposes
136 post_list_id = id or 'post_list_' + 'fixedvaluethatisnotauuid'
137 else:
138 post_list_id = id or 'post_list_' + uuid.uuid4().hex
139
140 # Get post from filename if available
141 if filename:
142 self_post = site.post_per_input_file.get(filename)
143 else:
144 self_post = None
145
146 if self_post:
147 self_post.register_depfile("####MAGIC####TIMELINE", lang=lang)
148
149 # If we get strings for start/stop, make them integers
150 if start is not None:
151 start = int(start)
152 if stop is not None:
153 stop = int(stop)
154
155 # Parse tags/categories/sections/slugs (input is strings)
156 categories = [c.strip().lower() for c in categories.split(',')] if categories else []
157 sections = [s.strip().lower() for s in sections.split(',')] if sections else []
158 slugs = [s.strip() for s in slugs.split(',')] if slugs else []
159
160 filtered_timeline = []
161 posts = []
162 step = None if reverse is False else -1
163
164 if type is not False:
165 post_type = type
166
167 if post_type == 'page' or post_type == 'pages':
168 timeline = [p for p in site.timeline if not p.use_in_feeds]
169 elif post_type == 'all':
170 timeline = [p for p in site.timeline]
171 else: # post
172 timeline = [p for p in site.timeline if p.use_in_feeds]
173
174 # self_post should be removed from timeline because this is redundant
175 timeline = [p for p in timeline if p.source_path != filename]
176
177 if categories:
178 timeline = [p for p in timeline if p.meta('category', lang=lang).lower() in categories]
179
180 if sections:
181 timeline = [p for p in timeline if p.section_name(lang).lower() in sections]
182
183 if tags:
184 tags = {t.strip().lower() for t in tags.split(',')}
185 if require_all_tags:
186 compare = set.issubset
187 else:
188 compare = operator.and_
189 for post in timeline:
190 post_tags = {t.lower() for t in post.tags}
191 if compare(tags, post_tags):
192 filtered_timeline.append(post)
193 else:
194 filtered_timeline = timeline
195
196 if sort:
197 filtered_timeline = natsort.natsorted(filtered_timeline, key=lambda post: post.meta[lang][sort], alg=natsort.ns.F | natsort.ns.IC)
198
199 if date:
200 _now = utils.current_time()
201 filtered_timeline = [p for p in filtered_timeline if date_in_range(utils.html_unescape(date), p.date, now=_now)]
202
203 for post in filtered_timeline[start:stop:step]:
204 if slugs:
205 cont = True
206 for slug in slugs:
207 if slug == post.meta('slug'):
208 cont = False
209
210 if cont:
211 continue
212
213 bp = post.translated_base_path(lang)
214 if os.path.exists(bp) and state:
215 state.document.settings.record_dependencies.add(bp)
216 elif os.path.exists(bp) and self_post:
217 self_post.register_depfile(bp, lang=lang)
218
219 posts += [post]
220
221 if not posts:
222 return '', []
223
224 template_deps = site.template_system.template_deps(template)
225 if state:
226 # Register template as a dependency (Issue #2391)
227 for d in template_deps:
228 state.document.settings.record_dependencies.add(d)
229 elif self_post:
230 for d in template_deps:
231 self_post.register_depfile(d, lang=lang)
232
233 template_data = {
234 'lang': lang,
235 'posts': posts,
236 # Need to provide str, not TranslatableSetting (Issue #2104)
237 'date_format': site.GLOBAL_CONTEXT.get('date_format')[lang],
238 'post_list_id': post_list_id,
239 'messages': site.MESSAGES,
240 '_link': site.link,
241 }
242 output = site.template_system.render_template(
243 template, None, template_data)
244 return output, template_deps
245
246
247 # Request file name from shortcode (Issue #2412)
248 PostListShortcode.handler.nikola_shortcode_pass_filename = True
249
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nikola/plugins/shortcode/post_list.py b/nikola/plugins/shortcode/post_list.py
--- a/nikola/plugins/shortcode/post_list.py
+++ b/nikola/plugins/shortcode/post_list.py
@@ -218,9 +218,6 @@
posts += [post]
- if not posts:
- return '', []
-
template_deps = site.template_system.template_deps(template)
if state:
# Register template as a dependency (Issue #2391)
| {"golden_diff": "diff --git a/nikola/plugins/shortcode/post_list.py b/nikola/plugins/shortcode/post_list.py\n--- a/nikola/plugins/shortcode/post_list.py\n+++ b/nikola/plugins/shortcode/post_list.py\n@@ -218,9 +218,6 @@\n \n posts += [post]\n \n- if not posts:\n- return '', []\n-\n template_deps = site.template_system.template_deps(template)\n if state:\n # Register template as a dependency (Issue #2391)\n", "issue": "The post_list plugin prevents 'else' functionality in templates\n<!--\r\nBefore creating an issue:\r\n* make sure you are using an up-to-date version of Nikola\r\n* search for existing issues that might be related\r\n\r\nMake sure to:\r\n* provide information about your environment (below)\r\n* include all the output you get, and any other information related to your problem\r\n\r\nNikola v7.6.4, as provided by Ubuntu, is NOT SUPPORTED.\r\nIf you are using this version, you should upgrade: https://getnikola.com/getting-started.html\r\n-->\r\n\r\n### Environment\r\n\r\n**Python Version:**\r\n\r\n3.7.8\r\n\r\n**Nikola Version:**\r\n\r\n8.1.1\r\n\r\n**Operating System:**\r\n\r\nMac OS Catalina (10.15.5) / Ubuntu 19.10 \r\n\r\n### Description:\r\n\r\nIn the default template for the `post-list` plugin, namely `post_list_directive.tmpl`\r\n\r\n```python\r\n{% if posts %}\r\n <ul class=\"post-list\">\r\n ...\r\n```\r\nWhich suggests that there is some possibility that the template will be called with no posts.\r\n\r\nWhile in `list_post.tmpl`, which you can also use with `post-list`, we have this:\r\n\r\n```python\r\n {% if posts %}\r\n <ul class=\"postlist\">\r\n {% for post in posts %}\r\n <li><time class=\"listdate\" datetime=\"{{ post.formatted_date('webiso') }}\" title=\"{{ post.formatted_date(date_format)|e }}\">{{ post.formatted_date(date_format)|e }}</time> <a href=\"{{ post.permalink() }}\" class=\"listtitle\">{{ post.title()|e }}</a></li>\r\n {% endfor %}\r\n </ul>\r\n {% else %}\r\n <p>{{ messages(\"No posts found.\") }}</p>\r\n {% endif %}\r\n```\r\n\r\nWhich is obviously expected to be able to handle the situation when there are no posts.\r\n\r\nHowever, when the plugin returns no posts, the `else` block is not executed. In fact, it appears that the template is not called at all when no posts are returned. \r\n\r\nThis is because of these lines in `post_list.py`, at around lines 221-222:\r\n\r\n```python\r\n if not posts:\r\n return '', []\r\n```\r\n\r\nIt seems that because the empty values are returned, processing is not passed to the template. Removing those lines fixes the problem and allows the template's `else` clause to work.\r\n\r\nI can't see that this change breaks anything else, so I'll submit a pull request for it, unless someone has an objection.\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2013-2020 Udo Spallek, Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Post list shortcode.\"\"\"\n\n\nimport operator\nimport os\nimport uuid\n\nimport natsort\n\nfrom nikola import utils\nfrom nikola.packages.datecond import date_in_range\nfrom nikola.plugin_categories import ShortcodePlugin\n\n\nclass PostListShortcode(ShortcodePlugin):\n \"\"\"Provide a shortcode to create a list of posts.\n\n Post List\n =========\n :Directive Arguments: None.\n :Directive Options: lang, start, stop, reverse, sort, date, tags, categories, sections, slugs, post_type, template, id\n :Directive Content: None.\n\n The posts appearing in the list can be filtered by options.\n *List slicing* is provided with the *start*, *stop* and *reverse* options.\n\n The following not required options are recognized:\n\n ``start`` : integer\n The index of the first post to show.\n A negative value like ``-3`` will show the *last* three posts in the\n post-list.\n Defaults to None.\n\n ``stop`` : integer\n The index of the last post to show.\n A value negative value like ``-1`` will show every post, but not the\n *last* in the post-list.\n Defaults to None.\n\n ``reverse`` : flag\n Reverse the order of the post-list.\n Defaults is to not reverse the order of posts.\n\n ``sort`` : string\n Sort post list by one of each post's attributes, usually ``title`` or a\n custom ``priority``. Defaults to None (chronological sorting).\n\n ``date`` : string\n Show posts that match date range specified by this option. Format:\n\n * comma-separated clauses (AND)\n * clause: attribute comparison_operator value (spaces optional)\n * attribute: year, month, day, hour, month, second, weekday, isoweekday; or empty for full datetime\n * comparison_operator: == != <= >= < >\n * value: integer, 'now', 'today', or dateutil-compatible date input\n\n ``tags`` : string [, string...]\n Filter posts to show only posts having at least one of the ``tags``.\n Defaults to None.\n\n ``require_all_tags`` : flag\n Change tag filter behaviour to show only posts that have all specified ``tags``.\n Defaults to False.\n\n ``categories`` : string [, string...]\n Filter posts to show only posts having one of the ``categories``.\n Defaults to None.\n\n ``sections`` : string [, string...]\n Filter posts to show only posts having one of the ``sections``.\n Defaults to None.\n\n ``slugs`` : string [, string...]\n Filter posts to show only posts having at least one of the ``slugs``.\n Defaults to None.\n\n ``post_type`` (or ``type``) : string\n Show only ``posts``, ``pages`` or ``all``.\n Replaces ``all``. Defaults to ``posts``.\n\n ``lang`` : string\n The language of post *titles* and *links*.\n Defaults to default language.\n\n ``template`` : string\n The name of an alternative template to render the post-list.\n Defaults to ``post_list_directive.tmpl``\n\n ``id`` : string\n A manual id for the post list.\n Defaults to a random name composed by 'post_list_' + uuid.uuid4().hex.\n \"\"\"\n\n name = \"post_list\"\n\n def set_site(self, site):\n \"\"\"Set the site.\"\"\"\n super().set_site(site)\n site.register_shortcode('post-list', self.handler)\n\n def handler(self, start=None, stop=None, reverse=False, tags=None, require_all_tags=False, categories=None,\n sections=None, slugs=None, post_type='post', type=False,\n lang=None, template='post_list_directive.tmpl', sort=None,\n id=None, data=None, state=None, site=None, date=None, filename=None, post=None):\n \"\"\"Generate HTML for post-list.\"\"\"\n if lang is None:\n lang = utils.LocaleBorg().current_lang\n if site.invariant: # for testing purposes\n post_list_id = id or 'post_list_' + 'fixedvaluethatisnotauuid'\n else:\n post_list_id = id or 'post_list_' + uuid.uuid4().hex\n\n # Get post from filename if available\n if filename:\n self_post = site.post_per_input_file.get(filename)\n else:\n self_post = None\n\n if self_post:\n self_post.register_depfile(\"####MAGIC####TIMELINE\", lang=lang)\n\n # If we get strings for start/stop, make them integers\n if start is not None:\n start = int(start)\n if stop is not None:\n stop = int(stop)\n\n # Parse tags/categories/sections/slugs (input is strings)\n categories = [c.strip().lower() for c in categories.split(',')] if categories else []\n sections = [s.strip().lower() for s in sections.split(',')] if sections else []\n slugs = [s.strip() for s in slugs.split(',')] if slugs else []\n\n filtered_timeline = []\n posts = []\n step = None if reverse is False else -1\n\n if type is not False:\n post_type = type\n\n if post_type == 'page' or post_type == 'pages':\n timeline = [p for p in site.timeline if not p.use_in_feeds]\n elif post_type == 'all':\n timeline = [p for p in site.timeline]\n else: # post\n timeline = [p for p in site.timeline if p.use_in_feeds]\n\n # self_post should be removed from timeline because this is redundant\n timeline = [p for p in timeline if p.source_path != filename]\n\n if categories:\n timeline = [p for p in timeline if p.meta('category', lang=lang).lower() in categories]\n\n if sections:\n timeline = [p for p in timeline if p.section_name(lang).lower() in sections]\n\n if tags:\n tags = {t.strip().lower() for t in tags.split(',')}\n if require_all_tags:\n compare = set.issubset\n else:\n compare = operator.and_\n for post in timeline:\n post_tags = {t.lower() for t in post.tags}\n if compare(tags, post_tags):\n filtered_timeline.append(post)\n else:\n filtered_timeline = timeline\n\n if sort:\n filtered_timeline = natsort.natsorted(filtered_timeline, key=lambda post: post.meta[lang][sort], alg=natsort.ns.F | natsort.ns.IC)\n\n if date:\n _now = utils.current_time()\n filtered_timeline = [p for p in filtered_timeline if date_in_range(utils.html_unescape(date), p.date, now=_now)]\n\n for post in filtered_timeline[start:stop:step]:\n if slugs:\n cont = True\n for slug in slugs:\n if slug == post.meta('slug'):\n cont = False\n\n if cont:\n continue\n\n bp = post.translated_base_path(lang)\n if os.path.exists(bp) and state:\n state.document.settings.record_dependencies.add(bp)\n elif os.path.exists(bp) and self_post:\n self_post.register_depfile(bp, lang=lang)\n\n posts += [post]\n\n if not posts:\n return '', []\n\n template_deps = site.template_system.template_deps(template)\n if state:\n # Register template as a dependency (Issue #2391)\n for d in template_deps:\n state.document.settings.record_dependencies.add(d)\n elif self_post:\n for d in template_deps:\n self_post.register_depfile(d, lang=lang)\n\n template_data = {\n 'lang': lang,\n 'posts': posts,\n # Need to provide str, not TranslatableSetting (Issue #2104)\n 'date_format': site.GLOBAL_CONTEXT.get('date_format')[lang],\n 'post_list_id': post_list_id,\n 'messages': site.MESSAGES,\n '_link': site.link,\n }\n output = site.template_system.render_template(\n template, None, template_data)\n return output, template_deps\n\n\n# Request file name from shortcode (Issue #2412)\nPostListShortcode.handler.nikola_shortcode_pass_filename = True\n", "path": "nikola/plugins/shortcode/post_list.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2013-2020 Udo Spallek, Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"Post list shortcode.\"\"\"\n\n\nimport operator\nimport os\nimport uuid\n\nimport natsort\n\nfrom nikola import utils\nfrom nikola.packages.datecond import date_in_range\nfrom nikola.plugin_categories import ShortcodePlugin\n\n\nclass PostListShortcode(ShortcodePlugin):\n \"\"\"Provide a shortcode to create a list of posts.\n\n Post List\n =========\n :Directive Arguments: None.\n :Directive Options: lang, start, stop, reverse, sort, date, tags, categories, sections, slugs, post_type, template, id\n :Directive Content: None.\n\n The posts appearing in the list can be filtered by options.\n *List slicing* is provided with the *start*, *stop* and *reverse* options.\n\n The following not required options are recognized:\n\n ``start`` : integer\n The index of the first post to show.\n A negative value like ``-3`` will show the *last* three posts in the\n post-list.\n Defaults to None.\n\n ``stop`` : integer\n The index of the last post to show.\n A value negative value like ``-1`` will show every post, but not the\n *last* in the post-list.\n Defaults to None.\n\n ``reverse`` : flag\n Reverse the order of the post-list.\n Defaults is to not reverse the order of posts.\n\n ``sort`` : string\n Sort post list by one of each post's attributes, usually ``title`` or a\n custom ``priority``. Defaults to None (chronological sorting).\n\n ``date`` : string\n Show posts that match date range specified by this option. Format:\n\n * comma-separated clauses (AND)\n * clause: attribute comparison_operator value (spaces optional)\n * attribute: year, month, day, hour, month, second, weekday, isoweekday; or empty for full datetime\n * comparison_operator: == != <= >= < >\n * value: integer, 'now', 'today', or dateutil-compatible date input\n\n ``tags`` : string [, string...]\n Filter posts to show only posts having at least one of the ``tags``.\n Defaults to None.\n\n ``require_all_tags`` : flag\n Change tag filter behaviour to show only posts that have all specified ``tags``.\n Defaults to False.\n\n ``categories`` : string [, string...]\n Filter posts to show only posts having one of the ``categories``.\n Defaults to None.\n\n ``sections`` : string [, string...]\n Filter posts to show only posts having one of the ``sections``.\n Defaults to None.\n\n ``slugs`` : string [, string...]\n Filter posts to show only posts having at least one of the ``slugs``.\n Defaults to None.\n\n ``post_type`` (or ``type``) : string\n Show only ``posts``, ``pages`` or ``all``.\n Replaces ``all``. Defaults to ``posts``.\n\n ``lang`` : string\n The language of post *titles* and *links*.\n Defaults to default language.\n\n ``template`` : string\n The name of an alternative template to render the post-list.\n Defaults to ``post_list_directive.tmpl``\n\n ``id`` : string\n A manual id for the post list.\n Defaults to a random name composed by 'post_list_' + uuid.uuid4().hex.\n \"\"\"\n\n name = \"post_list\"\n\n def set_site(self, site):\n \"\"\"Set the site.\"\"\"\n super().set_site(site)\n site.register_shortcode('post-list', self.handler)\n\n def handler(self, start=None, stop=None, reverse=False, tags=None, require_all_tags=False, categories=None,\n sections=None, slugs=None, post_type='post', type=False,\n lang=None, template='post_list_directive.tmpl', sort=None,\n id=None, data=None, state=None, site=None, date=None, filename=None, post=None):\n \"\"\"Generate HTML for post-list.\"\"\"\n if lang is None:\n lang = utils.LocaleBorg().current_lang\n if site.invariant: # for testing purposes\n post_list_id = id or 'post_list_' + 'fixedvaluethatisnotauuid'\n else:\n post_list_id = id or 'post_list_' + uuid.uuid4().hex\n\n # Get post from filename if available\n if filename:\n self_post = site.post_per_input_file.get(filename)\n else:\n self_post = None\n\n if self_post:\n self_post.register_depfile(\"####MAGIC####TIMELINE\", lang=lang)\n\n # If we get strings for start/stop, make them integers\n if start is not None:\n start = int(start)\n if stop is not None:\n stop = int(stop)\n\n # Parse tags/categories/sections/slugs (input is strings)\n categories = [c.strip().lower() for c in categories.split(',')] if categories else []\n sections = [s.strip().lower() for s in sections.split(',')] if sections else []\n slugs = [s.strip() for s in slugs.split(',')] if slugs else []\n\n filtered_timeline = []\n posts = []\n step = None if reverse is False else -1\n\n if type is not False:\n post_type = type\n\n if post_type == 'page' or post_type == 'pages':\n timeline = [p for p in site.timeline if not p.use_in_feeds]\n elif post_type == 'all':\n timeline = [p for p in site.timeline]\n else: # post\n timeline = [p for p in site.timeline if p.use_in_feeds]\n\n # self_post should be removed from timeline because this is redundant\n timeline = [p for p in timeline if p.source_path != filename]\n\n if categories:\n timeline = [p for p in timeline if p.meta('category', lang=lang).lower() in categories]\n\n if sections:\n timeline = [p for p in timeline if p.section_name(lang).lower() in sections]\n\n if tags:\n tags = {t.strip().lower() for t in tags.split(',')}\n if require_all_tags:\n compare = set.issubset\n else:\n compare = operator.and_\n for post in timeline:\n post_tags = {t.lower() for t in post.tags}\n if compare(tags, post_tags):\n filtered_timeline.append(post)\n else:\n filtered_timeline = timeline\n\n if sort:\n filtered_timeline = natsort.natsorted(filtered_timeline, key=lambda post: post.meta[lang][sort], alg=natsort.ns.F | natsort.ns.IC)\n\n if date:\n _now = utils.current_time()\n filtered_timeline = [p for p in filtered_timeline if date_in_range(utils.html_unescape(date), p.date, now=_now)]\n\n for post in filtered_timeline[start:stop:step]:\n if slugs:\n cont = True\n for slug in slugs:\n if slug == post.meta('slug'):\n cont = False\n\n if cont:\n continue\n\n bp = post.translated_base_path(lang)\n if os.path.exists(bp) and state:\n state.document.settings.record_dependencies.add(bp)\n elif os.path.exists(bp) and self_post:\n self_post.register_depfile(bp, lang=lang)\n\n posts += [post]\n\n template_deps = site.template_system.template_deps(template)\n if state:\n # Register template as a dependency (Issue #2391)\n for d in template_deps:\n state.document.settings.record_dependencies.add(d)\n elif self_post:\n for d in template_deps:\n self_post.register_depfile(d, lang=lang)\n\n template_data = {\n 'lang': lang,\n 'posts': posts,\n # Need to provide str, not TranslatableSetting (Issue #2104)\n 'date_format': site.GLOBAL_CONTEXT.get('date_format')[lang],\n 'post_list_id': post_list_id,\n 'messages': site.MESSAGES,\n '_link': site.link,\n }\n output = site.template_system.render_template(\n template, None, template_data)\n return output, template_deps\n\n\n# Request file name from shortcode (Issue #2412)\nPostListShortcode.handler.nikola_shortcode_pass_filename = True\n", "path": "nikola/plugins/shortcode/post_list.py"}]} | 3,527 | 112 |
gh_patches_debug_24993 | rasdani/github-patches | git_diff | keras-team__keras-nlp-425 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Naming inconsistency between Gpt2 and XLMRoberta
We handle case differently in our current naming for `Gpt2` and `XLMRoberta`. We should align on either:
- `XLMRoberta` and `GPT2`
- `XlmRoberta` and `Gpt2`
Once we decided on the naming we want, this can just be a simple rename. We should probably do this before the next major release.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras_nlp/models/__init__.py`
Content:
```
1 # Copyright 2022 The KerasNLP Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from keras_nlp.models.bert.bert_models import Bert
16 from keras_nlp.models.bert.bert_preprocessing import BertPreprocessor
17 from keras_nlp.models.bert.bert_tasks import BertClassifier
18 from keras_nlp.models.distilbert.distilbert_models import DistilBert
19 from keras_nlp.models.distilbert.distilbert_preprocessing import (
20 DistilBertPreprocessor,
21 )
22 from keras_nlp.models.gpt2.gpt2_models import Gpt2
23 from keras_nlp.models.roberta.roberta_models import Roberta
24 from keras_nlp.models.roberta.roberta_tasks import RobertaClassifier
25 from keras_nlp.models.xlm_roberta.xlm_roberta_models import XLMRoberta
26 from keras_nlp.models.xlm_roberta.xlm_roberta_preprocessing import (
27 XLMRobertaPreprocessor,
28 )
29
```
Path: `keras_nlp/models/gpt2/gpt2_models.py`
Content:
```
1 # Copyright 2022 The KerasNLP Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """GPT-2 backbone models."""
16
17 import tensorflow as tf
18 from tensorflow import keras
19
20 from keras_nlp.layers import PositionEmbedding
21 from keras_nlp.layers import TransformerDecoder
22
23
24 def _gpt_2_kernel_initializer(stddev=0.02):
25 return keras.initializers.RandomNormal(stddev=stddev)
26
27
28 @keras.utils.register_keras_serializable(package="keras_nlp")
29 class Gpt2(keras.Model):
30 """GPT-2 core network with hyperparameters.
31
32 This network implements a Transformer-based decoder network,
33 Generative Pretrained Transformer-2 (GPT-2), as described in
34 ["Language Models are Unsupervised Multitask Learners"](https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf).
35 It includes the embedding lookups and transformer layers.
36
37 The default constructor gives a fully customizable, randomly initalized
38 GPT-2 model with any number of layers, heads, and embedding
39 dimensions. To load preset architectures and weights, use the `from_presets`
40 constructor.
41
42 Args:
43 vocabulary_size: int. The size of the token vocabulary.
44 num_layers: int. The number of transformer layers.
45 num_heads: int. The number of attention heads for each transformer.
46 The hidden size must be divisible by the number of attention heads.
47 hidden_dim: int. The size of the transformer encoding and pooler layers.
48 intermediate_dim: int. The output dimension of the first Dense layer in
49 a two-layer feedforward network for each transformer.
50 dropout: float. Dropout probability for the Transformer encoder.
51 max_sequence_length: int. The maximum sequence length that this encoder
52 can consume. If None, `max_sequence_length` uses the value from
53 sequence length. This determines the variable shape for positional
54 embeddings.
55
56 Example usage:
57 ```python
58 input_data = {
59 "token_ids": tf.random.uniform(
60 shape=(1, 12), dtype=tf.int64, maxval=model.vocabulary_size
61 ),
62 "padding_mask": tf.constant(
63 [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], shape=(1, 12)
64 ),
65 }
66
67 # Randomly initialized GPT-2 decoder
68 model = keras_nlp.models.Gpt2(
69 vocabulary_size=50257,
70 num_layers=12,
71 num_heads=12,
72 hidden_dim=768,
73 intermediate_dim=3072,
74 max_sequence_length=1024,
75 )
76
77 # Call the model on the input data.
78 output = model(input_data)
79 ```
80 """
81
82 def __init__(
83 self,
84 vocabulary_size,
85 num_layers,
86 num_heads,
87 hidden_dim,
88 intermediate_dim,
89 dropout=0.1,
90 max_sequence_length=1024,
91 **kwargs,
92 ):
93
94 # Inputs
95 token_ids = keras.Input(shape=(None,), dtype="int32", name="token_ids")
96 padding_mask = keras.Input(
97 shape=(None,), dtype="int32", name="padding_mask"
98 )
99
100 # Embed tokens, positions.
101 token_embedding = keras.layers.Embedding(
102 input_dim=vocabulary_size,
103 output_dim=hidden_dim,
104 embeddings_initializer=_gpt_2_kernel_initializer(stddev=0.01),
105 name="token_embedding",
106 )(token_ids)
107
108 # Can't use `TokenAndPositionEmbedding` layer here because of different
109 # initializers.
110 position_embedding = PositionEmbedding(
111 initializer=_gpt_2_kernel_initializer(stddev=0.02),
112 sequence_length=max_sequence_length,
113 name="position_embedding",
114 )(token_embedding)
115
116 # Sum and apply dropout to embeddings.
117 x = keras.layers.Add()((token_embedding, position_embedding))
118 x = keras.layers.Dropout(
119 dropout,
120 name="embeddings_dropout",
121 )(x)
122
123 # Apply successive transformer decoder blocks.
124 for i in range(num_layers):
125 x = TransformerDecoder(
126 intermediate_dim=intermediate_dim,
127 num_heads=num_heads,
128 dropout=dropout,
129 activation=lambda x: keras.activations.gelu(
130 x, approximate=True
131 ),
132 layer_norm_epsilon=1e-05,
133 kernel_initializer=_gpt_2_kernel_initializer(stddev=0.02),
134 normalize_first=True,
135 name=f"transformer_layer_{i}",
136 )(x, decoder_padding_mask=padding_mask)
137
138 sequence_output = keras.layers.LayerNormalization(
139 name="layer_norm",
140 axis=-1,
141 epsilon=1e-05,
142 dtype=tf.float32,
143 )(x)
144
145 # Set default for `name` if none given
146 if "name" not in kwargs:
147 kwargs["name"] = "backbone"
148
149 # Instantiate using Functional API Model constructor
150 super().__init__(
151 inputs={
152 "token_ids": token_ids,
153 "padding_mask": padding_mask,
154 },
155 outputs=sequence_output,
156 **kwargs,
157 )
158 # All references to `self` below this line
159 self.vocabulary_size = vocabulary_size
160 self.num_layers = num_layers
161 self.num_heads = num_heads
162 self.hidden_dim = hidden_dim
163 self.intermediate_dim = intermediate_dim
164 self.dropout = dropout
165 self.max_sequence_length = max_sequence_length
166
167 def get_config(self):
168 return {
169 "vocabulary_size": self.vocabulary_size,
170 "num_layers": self.num_layers,
171 "num_heads": self.num_heads,
172 "hidden_dim": self.hidden_dim,
173 "intermediate_dim": self.intermediate_dim,
174 "dropout": self.dropout,
175 "max_sequence_length": self.max_sequence_length,
176 "name": self.name,
177 "trainable": self.trainable,
178 }
179
180 @classmethod
181 def from_config(cls, config):
182 return cls(**config)
183
184 @classmethod
185 def from_preset(
186 cls,
187 preset,
188 load_weights=True,
189 **kwargs,
190 ):
191 raise NotImplementedError
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/keras_nlp/models/__init__.py b/keras_nlp/models/__init__.py
--- a/keras_nlp/models/__init__.py
+++ b/keras_nlp/models/__init__.py
@@ -19,7 +19,7 @@
from keras_nlp.models.distilbert.distilbert_preprocessing import (
DistilBertPreprocessor,
)
-from keras_nlp.models.gpt2.gpt2_models import Gpt2
+from keras_nlp.models.gpt2.gpt2_models import GPT2
from keras_nlp.models.roberta.roberta_models import Roberta
from keras_nlp.models.roberta.roberta_tasks import RobertaClassifier
from keras_nlp.models.xlm_roberta.xlm_roberta_models import XLMRoberta
diff --git a/keras_nlp/models/gpt2/gpt2_models.py b/keras_nlp/models/gpt2/gpt2_models.py
--- a/keras_nlp/models/gpt2/gpt2_models.py
+++ b/keras_nlp/models/gpt2/gpt2_models.py
@@ -26,7 +26,7 @@
@keras.utils.register_keras_serializable(package="keras_nlp")
-class Gpt2(keras.Model):
+class GPT2(keras.Model):
"""GPT-2 core network with hyperparameters.
This network implements a Transformer-based decoder network,
@@ -65,7 +65,7 @@
}
# Randomly initialized GPT-2 decoder
- model = keras_nlp.models.Gpt2(
+ model = keras_nlp.models.GPT2(
vocabulary_size=50257,
num_layers=12,
num_heads=12,
| {"golden_diff": "diff --git a/keras_nlp/models/__init__.py b/keras_nlp/models/__init__.py\n--- a/keras_nlp/models/__init__.py\n+++ b/keras_nlp/models/__init__.py\n@@ -19,7 +19,7 @@\n from keras_nlp.models.distilbert.distilbert_preprocessing import (\n DistilBertPreprocessor,\n )\n-from keras_nlp.models.gpt2.gpt2_models import Gpt2\n+from keras_nlp.models.gpt2.gpt2_models import GPT2\n from keras_nlp.models.roberta.roberta_models import Roberta\n from keras_nlp.models.roberta.roberta_tasks import RobertaClassifier\n from keras_nlp.models.xlm_roberta.xlm_roberta_models import XLMRoberta\ndiff --git a/keras_nlp/models/gpt2/gpt2_models.py b/keras_nlp/models/gpt2/gpt2_models.py\n--- a/keras_nlp/models/gpt2/gpt2_models.py\n+++ b/keras_nlp/models/gpt2/gpt2_models.py\n@@ -26,7 +26,7 @@\n \n \n @keras.utils.register_keras_serializable(package=\"keras_nlp\")\n-class Gpt2(keras.Model):\n+class GPT2(keras.Model):\n \"\"\"GPT-2 core network with hyperparameters.\n \n This network implements a Transformer-based decoder network,\n@@ -65,7 +65,7 @@\n }\n \n # Randomly initialized GPT-2 decoder\n- model = keras_nlp.models.Gpt2(\n+ model = keras_nlp.models.GPT2(\n vocabulary_size=50257,\n num_layers=12,\n num_heads=12,\n", "issue": "Naming inconsistency between Gpt2 and XLMRoberta\nWe handle case differently in our current naming for `Gpt2` and `XLMRoberta`. We should align on either:\r\n - `XLMRoberta` and `GPT2`\r\n - `XlmRoberta` and `Gpt2`\r\n\r\nOnce we decided on the naming we want, this can just be a simple rename. We should probably do this before the next major release.\n", "before_files": [{"content": "# Copyright 2022 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom keras_nlp.models.bert.bert_models import Bert\nfrom keras_nlp.models.bert.bert_preprocessing import BertPreprocessor\nfrom keras_nlp.models.bert.bert_tasks import BertClassifier\nfrom keras_nlp.models.distilbert.distilbert_models import DistilBert\nfrom keras_nlp.models.distilbert.distilbert_preprocessing import (\n DistilBertPreprocessor,\n)\nfrom keras_nlp.models.gpt2.gpt2_models import Gpt2\nfrom keras_nlp.models.roberta.roberta_models import Roberta\nfrom keras_nlp.models.roberta.roberta_tasks import RobertaClassifier\nfrom keras_nlp.models.xlm_roberta.xlm_roberta_models import XLMRoberta\nfrom keras_nlp.models.xlm_roberta.xlm_roberta_preprocessing import (\n XLMRobertaPreprocessor,\n)\n", "path": "keras_nlp/models/__init__.py"}, {"content": "# Copyright 2022 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"GPT-2 backbone models.\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nfrom keras_nlp.layers import PositionEmbedding\nfrom keras_nlp.layers import TransformerDecoder\n\n\ndef _gpt_2_kernel_initializer(stddev=0.02):\n return keras.initializers.RandomNormal(stddev=stddev)\n\n\[email protected]_keras_serializable(package=\"keras_nlp\")\nclass Gpt2(keras.Model):\n \"\"\"GPT-2 core network with hyperparameters.\n\n This network implements a Transformer-based decoder network,\n Generative Pretrained Transformer-2 (GPT-2), as described in\n [\"Language Models are Unsupervised Multitask Learners\"](https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf).\n It includes the embedding lookups and transformer layers.\n\n The default constructor gives a fully customizable, randomly initalized\n GPT-2 model with any number of layers, heads, and embedding\n dimensions. To load preset architectures and weights, use the `from_presets`\n constructor.\n\n Args:\n vocabulary_size: int. The size of the token vocabulary.\n num_layers: int. The number of transformer layers.\n num_heads: int. The number of attention heads for each transformer.\n The hidden size must be divisible by the number of attention heads.\n hidden_dim: int. The size of the transformer encoding and pooler layers.\n intermediate_dim: int. The output dimension of the first Dense layer in\n a two-layer feedforward network for each transformer.\n dropout: float. Dropout probability for the Transformer encoder.\n max_sequence_length: int. The maximum sequence length that this encoder\n can consume. If None, `max_sequence_length` uses the value from\n sequence length. This determines the variable shape for positional\n embeddings.\n\n Example usage:\n ```python\n input_data = {\n \"token_ids\": tf.random.uniform(\n shape=(1, 12), dtype=tf.int64, maxval=model.vocabulary_size\n ),\n \"padding_mask\": tf.constant(\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], shape=(1, 12)\n ),\n }\n\n # Randomly initialized GPT-2 decoder\n model = keras_nlp.models.Gpt2(\n vocabulary_size=50257,\n num_layers=12,\n num_heads=12,\n hidden_dim=768,\n intermediate_dim=3072,\n max_sequence_length=1024,\n )\n\n # Call the model on the input data.\n output = model(input_data)\n ```\n \"\"\"\n\n def __init__(\n self,\n vocabulary_size,\n num_layers,\n num_heads,\n hidden_dim,\n intermediate_dim,\n dropout=0.1,\n max_sequence_length=1024,\n **kwargs,\n ):\n\n # Inputs\n token_ids = keras.Input(shape=(None,), dtype=\"int32\", name=\"token_ids\")\n padding_mask = keras.Input(\n shape=(None,), dtype=\"int32\", name=\"padding_mask\"\n )\n\n # Embed tokens, positions.\n token_embedding = keras.layers.Embedding(\n input_dim=vocabulary_size,\n output_dim=hidden_dim,\n embeddings_initializer=_gpt_2_kernel_initializer(stddev=0.01),\n name=\"token_embedding\",\n )(token_ids)\n\n # Can't use `TokenAndPositionEmbedding` layer here because of different\n # initializers.\n position_embedding = PositionEmbedding(\n initializer=_gpt_2_kernel_initializer(stddev=0.02),\n sequence_length=max_sequence_length,\n name=\"position_embedding\",\n )(token_embedding)\n\n # Sum and apply dropout to embeddings.\n x = keras.layers.Add()((token_embedding, position_embedding))\n x = keras.layers.Dropout(\n dropout,\n name=\"embeddings_dropout\",\n )(x)\n\n # Apply successive transformer decoder blocks.\n for i in range(num_layers):\n x = TransformerDecoder(\n intermediate_dim=intermediate_dim,\n num_heads=num_heads,\n dropout=dropout,\n activation=lambda x: keras.activations.gelu(\n x, approximate=True\n ),\n layer_norm_epsilon=1e-05,\n kernel_initializer=_gpt_2_kernel_initializer(stddev=0.02),\n normalize_first=True,\n name=f\"transformer_layer_{i}\",\n )(x, decoder_padding_mask=padding_mask)\n\n sequence_output = keras.layers.LayerNormalization(\n name=\"layer_norm\",\n axis=-1,\n epsilon=1e-05,\n dtype=tf.float32,\n )(x)\n\n # Set default for `name` if none given\n if \"name\" not in kwargs:\n kwargs[\"name\"] = \"backbone\"\n\n # Instantiate using Functional API Model constructor\n super().__init__(\n inputs={\n \"token_ids\": token_ids,\n \"padding_mask\": padding_mask,\n },\n outputs=sequence_output,\n **kwargs,\n )\n # All references to `self` below this line\n self.vocabulary_size = vocabulary_size\n self.num_layers = num_layers\n self.num_heads = num_heads\n self.hidden_dim = hidden_dim\n self.intermediate_dim = intermediate_dim\n self.dropout = dropout\n self.max_sequence_length = max_sequence_length\n\n def get_config(self):\n return {\n \"vocabulary_size\": self.vocabulary_size,\n \"num_layers\": self.num_layers,\n \"num_heads\": self.num_heads,\n \"hidden_dim\": self.hidden_dim,\n \"intermediate_dim\": self.intermediate_dim,\n \"dropout\": self.dropout,\n \"max_sequence_length\": self.max_sequence_length,\n \"name\": self.name,\n \"trainable\": self.trainable,\n }\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)\n\n @classmethod\n def from_preset(\n cls,\n preset,\n load_weights=True,\n **kwargs,\n ):\n raise NotImplementedError\n", "path": "keras_nlp/models/gpt2/gpt2_models.py"}], "after_files": [{"content": "# Copyright 2022 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom keras_nlp.models.bert.bert_models import Bert\nfrom keras_nlp.models.bert.bert_preprocessing import BertPreprocessor\nfrom keras_nlp.models.bert.bert_tasks import BertClassifier\nfrom keras_nlp.models.distilbert.distilbert_models import DistilBert\nfrom keras_nlp.models.distilbert.distilbert_preprocessing import (\n DistilBertPreprocessor,\n)\nfrom keras_nlp.models.gpt2.gpt2_models import GPT2\nfrom keras_nlp.models.roberta.roberta_models import Roberta\nfrom keras_nlp.models.roberta.roberta_tasks import RobertaClassifier\nfrom keras_nlp.models.xlm_roberta.xlm_roberta_models import XLMRoberta\nfrom keras_nlp.models.xlm_roberta.xlm_roberta_preprocessing import (\n XLMRobertaPreprocessor,\n)\n", "path": "keras_nlp/models/__init__.py"}, {"content": "# Copyright 2022 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"GPT-2 backbone models.\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nfrom keras_nlp.layers import PositionEmbedding\nfrom keras_nlp.layers import TransformerDecoder\n\n\ndef _gpt_2_kernel_initializer(stddev=0.02):\n return keras.initializers.RandomNormal(stddev=stddev)\n\n\[email protected]_keras_serializable(package=\"keras_nlp\")\nclass GPT2(keras.Model):\n \"\"\"GPT-2 core network with hyperparameters.\n\n This network implements a Transformer-based decoder network,\n Generative Pretrained Transformer-2 (GPT-2), as described in\n [\"Language Models are Unsupervised Multitask Learners\"](https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf).\n It includes the embedding lookups and transformer layers.\n\n The default constructor gives a fully customizable, randomly initalized\n GPT-2 model with any number of layers, heads, and embedding\n dimensions. To load preset architectures and weights, use the `from_presets`\n constructor.\n\n Args:\n vocabulary_size: int. The size of the token vocabulary.\n num_layers: int. The number of transformer layers.\n num_heads: int. The number of attention heads for each transformer.\n The hidden size must be divisible by the number of attention heads.\n hidden_dim: int. The size of the transformer encoding and pooler layers.\n intermediate_dim: int. The output dimension of the first Dense layer in\n a two-layer feedforward network for each transformer.\n dropout: float. Dropout probability for the Transformer encoder.\n max_sequence_length: int. The maximum sequence length that this encoder\n can consume. If None, `max_sequence_length` uses the value from\n sequence length. This determines the variable shape for positional\n embeddings.\n\n Example usage:\n ```python\n input_data = {\n \"token_ids\": tf.random.uniform(\n shape=(1, 12), dtype=tf.int64, maxval=model.vocabulary_size\n ),\n \"padding_mask\": tf.constant(\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0], shape=(1, 12)\n ),\n }\n\n # Randomly initialized GPT-2 decoder\n model = keras_nlp.models.GPT2(\n vocabulary_size=50257,\n num_layers=12,\n num_heads=12,\n hidden_dim=768,\n intermediate_dim=3072,\n max_sequence_length=1024,\n )\n\n # Call the model on the input data.\n output = model(input_data)\n ```\n \"\"\"\n\n def __init__(\n self,\n vocabulary_size,\n num_layers,\n num_heads,\n hidden_dim,\n intermediate_dim,\n dropout=0.1,\n max_sequence_length=1024,\n **kwargs,\n ):\n\n # Inputs\n token_ids = keras.Input(shape=(None,), dtype=\"int32\", name=\"token_ids\")\n padding_mask = keras.Input(\n shape=(None,), dtype=\"int32\", name=\"padding_mask\"\n )\n\n # Embed tokens, positions.\n token_embedding = keras.layers.Embedding(\n input_dim=vocabulary_size,\n output_dim=hidden_dim,\n embeddings_initializer=_gpt_2_kernel_initializer(stddev=0.01),\n name=\"token_embedding\",\n )(token_ids)\n\n # Can't use `TokenAndPositionEmbedding` layer here because of different\n # initializers.\n position_embedding = PositionEmbedding(\n initializer=_gpt_2_kernel_initializer(stddev=0.02),\n sequence_length=max_sequence_length,\n name=\"position_embedding\",\n )(token_embedding)\n\n # Sum and apply dropout to embeddings.\n x = keras.layers.Add()((token_embedding, position_embedding))\n x = keras.layers.Dropout(\n dropout,\n name=\"embeddings_dropout\",\n )(x)\n\n # Apply successive transformer decoder blocks.\n for i in range(num_layers):\n x = TransformerDecoder(\n intermediate_dim=intermediate_dim,\n num_heads=num_heads,\n dropout=dropout,\n activation=lambda x: keras.activations.gelu(\n x, approximate=True\n ),\n layer_norm_epsilon=1e-05,\n kernel_initializer=_gpt_2_kernel_initializer(stddev=0.02),\n normalize_first=True,\n name=f\"transformer_layer_{i}\",\n )(x, decoder_padding_mask=padding_mask)\n\n sequence_output = keras.layers.LayerNormalization(\n name=\"layer_norm\",\n axis=-1,\n epsilon=1e-05,\n dtype=tf.float32,\n )(x)\n\n # Set default for `name` if none given\n if \"name\" not in kwargs:\n kwargs[\"name\"] = \"backbone\"\n\n # Instantiate using Functional API Model constructor\n super().__init__(\n inputs={\n \"token_ids\": token_ids,\n \"padding_mask\": padding_mask,\n },\n outputs=sequence_output,\n **kwargs,\n )\n # All references to `self` below this line\n self.vocabulary_size = vocabulary_size\n self.num_layers = num_layers\n self.num_heads = num_heads\n self.hidden_dim = hidden_dim\n self.intermediate_dim = intermediate_dim\n self.dropout = dropout\n self.max_sequence_length = max_sequence_length\n\n def get_config(self):\n return {\n \"vocabulary_size\": self.vocabulary_size,\n \"num_layers\": self.num_layers,\n \"num_heads\": self.num_heads,\n \"hidden_dim\": self.hidden_dim,\n \"intermediate_dim\": self.intermediate_dim,\n \"dropout\": self.dropout,\n \"max_sequence_length\": self.max_sequence_length,\n \"name\": self.name,\n \"trainable\": self.trainable,\n }\n\n @classmethod\n def from_config(cls, config):\n return cls(**config)\n\n @classmethod\n def from_preset(\n cls,\n preset,\n load_weights=True,\n **kwargs,\n ):\n raise NotImplementedError\n", "path": "keras_nlp/models/gpt2/gpt2_models.py"}]} | 2,683 | 389 |
gh_patches_debug_2783 | rasdani/github-patches | git_diff | chainer__chainer-524 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error type mismatch when broadcast fails
When broadcast fails, numpy causes `ValueError`, but cupy causes `RuntimeError`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/manipulation/dims.py`
Content:
```
1 import six
2
3 import cupy
4 from cupy import internal
5
6
7 zip_longest = six.moves.zip_longest
8 six_zip = six.moves.zip
9
10
11 def atleast_1d(*arys):
12 """Converts arrays to arrays with dimensions >= 1.
13
14 Args:
15 arys (tuple of arrays): Arrays to be converted. All arguments must be
16 cupy.ndarray objects. Only zero-dimensional array is affected.
17
18 Returns:
19 If there are only one input, then it returns its converted version.
20 Otherwise, it returns a list of converted arrays.
21
22 .. seealso:: :func:`numpy.atleast_1d`
23
24 """
25 res = []
26 for a in arys:
27 if not isinstance(a, cupy.ndarray):
28 raise TypeError('Only cupy arrays can be atleast_1d')
29 if a.ndim == 0:
30 a = a.reshape(1)
31 res.append(a)
32 if len(res) == 1:
33 res = res[0]
34 return res
35
36
37 def atleast_2d(*arys):
38 """Converts arrays to arrays with dimensions >= 2.
39
40 If an input array has dimensions less than two, then this function inserts
41 new axes at the head of dimensions to make it have two dimensions.
42
43 Args:
44 arys (tuple of arrays): Arrays to be converted. All arguments must be
45 cupy.ndarray objects.
46
47 Returns:
48 If there are only one input, then it returns its converted version.
49 Otherwise, it returns a list of converted arrays.
50
51 .. seealso:: :func:`numpy.atleast_2d`
52
53 """
54 res = []
55 for a in arys:
56 if not isinstance(a, cupy.ndarray):
57 raise TypeError('Only cupy arrays can be atleast_2d')
58 if a.ndim == 0:
59 a = a.reshape(1, 1)
60 elif a.ndim == 1:
61 a = a[cupy.newaxis, :]
62 res.append(a)
63 if len(res) == 1:
64 res = res[0]
65 return res
66
67
68 def atleast_3d(*arys):
69 """Converts arrays to arrays with dimensions >= 3.
70
71 If an input array has dimensions less than three, then this function
72 inserts new axes to make it have three dimensions. The place of the new
73 axes are following:
74
75 - If its shape is ``()``, then the shape of output is ``(1, 1, 1)``.
76 - If its shape is ``(N,)``, then the shape of output is ``(1, N, 1)``.
77 - If its shape is ``(M, N)``, then the shape of output is ``(M, N, 1)``.
78 - Otherwise, the output is the input array itself.
79
80 Args:
81 arys (tuple of arrays): Arrays to be converted. All arguments must be
82 cupy.ndarray objects.
83
84 Returns:
85 If there are only one input, then it returns its converted version.
86 Otherwise, it returns a list of converted arrays.
87
88 .. seealso:: :func:`numpy.atleast_3d`
89
90 """
91 res = []
92 for a in arys:
93 if not isinstance(a, cupy.ndarray):
94 raise TypeError('Only cupy arrays can be atleast_3d')
95 if a.ndim == 0:
96 a = a.reshape(1, 1, 1)
97 elif a.ndim == 1:
98 a = a[cupy.newaxis, :, cupy.newaxis]
99 elif a.ndim == 2:
100 a = a[:, :, cupy.newaxis]
101 res.append(a)
102 if len(res) == 1:
103 res = res[0]
104 return res
105
106
107 class broadcast(object):
108 """Object that performs broadcasting.
109
110 CuPy actually uses this class to support broadcasting in various
111 operations. Note that this class does not provide an iterator.
112
113 Args:
114 arrays (tuple of arrays): Arrays to be broadcasted.
115
116 Attributes:
117 shape (tuple of ints): The broadcasted shape.
118 nd (int): Number of dimensions of the broadcasted shape.
119 size (int): Total size of the broadcasted shape.
120 values (list of arrays): The broadcasted arrays.
121
122 .. seealso:: :class:`numpy.broadcast`
123
124 """
125
126 def __init__(self, *arrays):
127 ndarray = cupy.ndarray
128 rev = slice(None, None, -1)
129 shape_arr = [a._shape[rev] for a in arrays
130 if isinstance(a, ndarray)]
131 r_shape = [max(ss) for ss in zip_longest(*shape_arr, fillvalue=0)]
132
133 self.shape = shape = tuple(r_shape[rev])
134 self.size = size = internal.prod(shape)
135 self.nd = ndim = len(shape)
136
137 broadcasted = list(arrays)
138 for i, a in enumerate(broadcasted):
139 if not isinstance(a, ndarray):
140 continue
141
142 a_shape = a.shape
143 if a_shape == shape:
144 continue
145
146 r_strides = [
147 a_st if sh == a_sh else (0 if a_sh == 1 else None)
148 for sh, a_sh, a_st
149 in six_zip(r_shape, a._shape[rev], a._strides[rev])]
150
151 if None in r_strides:
152 raise RuntimeError('Broadcasting failed')
153
154 offset = (0,) * (ndim - len(r_strides))
155
156 broadcasted[i] = view = a.view()
157 view._shape = shape
158 view._strides = offset + tuple(r_strides[rev])
159 view._size = size
160 view._c_contiguous = -1
161 view._f_contiguous = -1
162
163 self.values = tuple(broadcasted)
164
165
166 def broadcast_arrays(*args):
167 """Broadcasts given arrays.
168
169 Args:
170 args (tuple of arrays): Arrays to broadcast for each other.
171
172 Returns:
173 list: A list of broadcasted arrays.
174
175 .. seealso:: :func:`numpy.broadcast_arrays`
176
177 """
178 return broadcast(*args).values
179
180
181 def expand_dims(a, axis):
182 """Expands given arrays.
183
184 Args:
185 a (cupy.ndarray): Array to be expanded.
186 axis (int): Position where new axis is to be inserted.
187
188 Returns:
189 cupy.ndarray: The number of dimensions is one greater than that of
190 the input array.
191
192 .. seealso:: :func:`numpy.expand_dims`
193
194 """
195 shape = a.shape
196 if axis < 0:
197 axis = axis + len(shape) + 1
198 return a.reshape(shape[:axis] + (1,) + shape[axis:])
199
200
201 def squeeze(a, axis=None):
202 """Removes size-one axes from the shape of an array.
203
204 Args:
205 a (cupy.ndarray): Array to be reshaped.
206 axis (int or tuple of ints): Axes to be removed. This function removes
207 all size-one axes by default. If one of the specified axes is not
208 of size one, an exception is raised.
209
210 Returns:
211 cupy.ndarray: An array without (specified) size-one axes.
212
213 .. seealso:: :func:`numpy.squeeze`
214
215 """
216 if axis is None:
217 axis = tuple(i for i, n in enumerate(a._shape) if n == 1)
218 elif isinstance(axis, int):
219 axis = axis,
220
221 new_shape = []
222 new_strides = []
223 j = 0
224 for i, n in enumerate(a._shape):
225 if j < len(axis) and i == axis[j]:
226 if n != 1:
227 raise RuntimeError('Cannot squeeze dimension of size > 1')
228 j += 1
229 else:
230 new_shape.append(n)
231 new_strides.append(a._strides[i])
232
233 v = a.view()
234 v._shape = tuple(new_shape)
235 v._strides = tuple(new_strides)
236 v._c_contiguous = -1
237 v._f_contiguous = -1
238 return v
239
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/manipulation/dims.py b/cupy/manipulation/dims.py
--- a/cupy/manipulation/dims.py
+++ b/cupy/manipulation/dims.py
@@ -149,7 +149,7 @@
in six_zip(r_shape, a._shape[rev], a._strides[rev])]
if None in r_strides:
- raise RuntimeError('Broadcasting failed')
+ raise ValueError('Broadcasting failed')
offset = (0,) * (ndim - len(r_strides))
| {"golden_diff": "diff --git a/cupy/manipulation/dims.py b/cupy/manipulation/dims.py\n--- a/cupy/manipulation/dims.py\n+++ b/cupy/manipulation/dims.py\n@@ -149,7 +149,7 @@\n in six_zip(r_shape, a._shape[rev], a._strides[rev])]\n \n if None in r_strides:\n- raise RuntimeError('Broadcasting failed')\n+ raise ValueError('Broadcasting failed')\n \n offset = (0,) * (ndim - len(r_strides))\n", "issue": "Error type mismatch when broadcast fails\nWhen broadcast fails, numpy causes `ValueError`, but cupy causes `RuntimeError`.\n\n", "before_files": [{"content": "import six\n\nimport cupy\nfrom cupy import internal\n\n\nzip_longest = six.moves.zip_longest\nsix_zip = six.moves.zip\n\n\ndef atleast_1d(*arys):\n \"\"\"Converts arrays to arrays with dimensions >= 1.\n\n Args:\n arys (tuple of arrays): Arrays to be converted. All arguments must be\n cupy.ndarray objects. Only zero-dimensional array is affected.\n\n Returns:\n If there are only one input, then it returns its converted version.\n Otherwise, it returns a list of converted arrays.\n\n .. seealso:: :func:`numpy.atleast_1d`\n\n \"\"\"\n res = []\n for a in arys:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be atleast_1d')\n if a.ndim == 0:\n a = a.reshape(1)\n res.append(a)\n if len(res) == 1:\n res = res[0]\n return res\n\n\ndef atleast_2d(*arys):\n \"\"\"Converts arrays to arrays with dimensions >= 2.\n\n If an input array has dimensions less than two, then this function inserts\n new axes at the head of dimensions to make it have two dimensions.\n\n Args:\n arys (tuple of arrays): Arrays to be converted. All arguments must be\n cupy.ndarray objects.\n\n Returns:\n If there are only one input, then it returns its converted version.\n Otherwise, it returns a list of converted arrays.\n\n .. seealso:: :func:`numpy.atleast_2d`\n\n \"\"\"\n res = []\n for a in arys:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be atleast_2d')\n if a.ndim == 0:\n a = a.reshape(1, 1)\n elif a.ndim == 1:\n a = a[cupy.newaxis, :]\n res.append(a)\n if len(res) == 1:\n res = res[0]\n return res\n\n\ndef atleast_3d(*arys):\n \"\"\"Converts arrays to arrays with dimensions >= 3.\n\n If an input array has dimensions less than three, then this function\n inserts new axes to make it have three dimensions. The place of the new\n axes are following:\n\n - If its shape is ``()``, then the shape of output is ``(1, 1, 1)``.\n - If its shape is ``(N,)``, then the shape of output is ``(1, N, 1)``.\n - If its shape is ``(M, N)``, then the shape of output is ``(M, N, 1)``.\n - Otherwise, the output is the input array itself.\n\n Args:\n arys (tuple of arrays): Arrays to be converted. All arguments must be\n cupy.ndarray objects.\n\n Returns:\n If there are only one input, then it returns its converted version.\n Otherwise, it returns a list of converted arrays.\n\n .. seealso:: :func:`numpy.atleast_3d`\n\n \"\"\"\n res = []\n for a in arys:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be atleast_3d')\n if a.ndim == 0:\n a = a.reshape(1, 1, 1)\n elif a.ndim == 1:\n a = a[cupy.newaxis, :, cupy.newaxis]\n elif a.ndim == 2:\n a = a[:, :, cupy.newaxis]\n res.append(a)\n if len(res) == 1:\n res = res[0]\n return res\n\n\nclass broadcast(object):\n \"\"\"Object that performs broadcasting.\n\n CuPy actually uses this class to support broadcasting in various\n operations. Note that this class does not provide an iterator.\n\n Args:\n arrays (tuple of arrays): Arrays to be broadcasted.\n\n Attributes:\n shape (tuple of ints): The broadcasted shape.\n nd (int): Number of dimensions of the broadcasted shape.\n size (int): Total size of the broadcasted shape.\n values (list of arrays): The broadcasted arrays.\n\n .. seealso:: :class:`numpy.broadcast`\n\n \"\"\"\n\n def __init__(self, *arrays):\n ndarray = cupy.ndarray\n rev = slice(None, None, -1)\n shape_arr = [a._shape[rev] for a in arrays\n if isinstance(a, ndarray)]\n r_shape = [max(ss) for ss in zip_longest(*shape_arr, fillvalue=0)]\n\n self.shape = shape = tuple(r_shape[rev])\n self.size = size = internal.prod(shape)\n self.nd = ndim = len(shape)\n\n broadcasted = list(arrays)\n for i, a in enumerate(broadcasted):\n if not isinstance(a, ndarray):\n continue\n\n a_shape = a.shape\n if a_shape == shape:\n continue\n\n r_strides = [\n a_st if sh == a_sh else (0 if a_sh == 1 else None)\n for sh, a_sh, a_st\n in six_zip(r_shape, a._shape[rev], a._strides[rev])]\n\n if None in r_strides:\n raise RuntimeError('Broadcasting failed')\n\n offset = (0,) * (ndim - len(r_strides))\n\n broadcasted[i] = view = a.view()\n view._shape = shape\n view._strides = offset + tuple(r_strides[rev])\n view._size = size\n view._c_contiguous = -1\n view._f_contiguous = -1\n\n self.values = tuple(broadcasted)\n\n\ndef broadcast_arrays(*args):\n \"\"\"Broadcasts given arrays.\n\n Args:\n args (tuple of arrays): Arrays to broadcast for each other.\n\n Returns:\n list: A list of broadcasted arrays.\n\n .. seealso:: :func:`numpy.broadcast_arrays`\n\n \"\"\"\n return broadcast(*args).values\n\n\ndef expand_dims(a, axis):\n \"\"\"Expands given arrays.\n\n Args:\n a (cupy.ndarray): Array to be expanded.\n axis (int): Position where new axis is to be inserted.\n\n Returns:\n cupy.ndarray: The number of dimensions is one greater than that of\n the input array.\n\n .. seealso:: :func:`numpy.expand_dims`\n\n \"\"\"\n shape = a.shape\n if axis < 0:\n axis = axis + len(shape) + 1\n return a.reshape(shape[:axis] + (1,) + shape[axis:])\n\n\ndef squeeze(a, axis=None):\n \"\"\"Removes size-one axes from the shape of an array.\n\n Args:\n a (cupy.ndarray): Array to be reshaped.\n axis (int or tuple of ints): Axes to be removed. This function removes\n all size-one axes by default. If one of the specified axes is not\n of size one, an exception is raised.\n\n Returns:\n cupy.ndarray: An array without (specified) size-one axes.\n\n .. seealso:: :func:`numpy.squeeze`\n\n \"\"\"\n if axis is None:\n axis = tuple(i for i, n in enumerate(a._shape) if n == 1)\n elif isinstance(axis, int):\n axis = axis,\n\n new_shape = []\n new_strides = []\n j = 0\n for i, n in enumerate(a._shape):\n if j < len(axis) and i == axis[j]:\n if n != 1:\n raise RuntimeError('Cannot squeeze dimension of size > 1')\n j += 1\n else:\n new_shape.append(n)\n new_strides.append(a._strides[i])\n\n v = a.view()\n v._shape = tuple(new_shape)\n v._strides = tuple(new_strides)\n v._c_contiguous = -1\n v._f_contiguous = -1\n return v\n", "path": "cupy/manipulation/dims.py"}], "after_files": [{"content": "import six\n\nimport cupy\nfrom cupy import internal\n\n\nzip_longest = six.moves.zip_longest\nsix_zip = six.moves.zip\n\n\ndef atleast_1d(*arys):\n \"\"\"Converts arrays to arrays with dimensions >= 1.\n\n Args:\n arys (tuple of arrays): Arrays to be converted. All arguments must be\n cupy.ndarray objects. Only zero-dimensional array is affected.\n\n Returns:\n If there are only one input, then it returns its converted version.\n Otherwise, it returns a list of converted arrays.\n\n .. seealso:: :func:`numpy.atleast_1d`\n\n \"\"\"\n res = []\n for a in arys:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be atleast_1d')\n if a.ndim == 0:\n a = a.reshape(1)\n res.append(a)\n if len(res) == 1:\n res = res[0]\n return res\n\n\ndef atleast_2d(*arys):\n \"\"\"Converts arrays to arrays with dimensions >= 2.\n\n If an input array has dimensions less than two, then this function inserts\n new axes at the head of dimensions to make it have two dimensions.\n\n Args:\n arys (tuple of arrays): Arrays to be converted. All arguments must be\n cupy.ndarray objects.\n\n Returns:\n If there are only one input, then it returns its converted version.\n Otherwise, it returns a list of converted arrays.\n\n .. seealso:: :func:`numpy.atleast_2d`\n\n \"\"\"\n res = []\n for a in arys:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be atleast_2d')\n if a.ndim == 0:\n a = a.reshape(1, 1)\n elif a.ndim == 1:\n a = a[cupy.newaxis, :]\n res.append(a)\n if len(res) == 1:\n res = res[0]\n return res\n\n\ndef atleast_3d(*arys):\n \"\"\"Converts arrays to arrays with dimensions >= 3.\n\n If an input array has dimensions less than three, then this function\n inserts new axes to make it have three dimensions. The place of the new\n axes are following:\n\n - If its shape is ``()``, then the shape of output is ``(1, 1, 1)``.\n - If its shape is ``(N,)``, then the shape of output is ``(1, N, 1)``.\n - If its shape is ``(M, N)``, then the shape of output is ``(M, N, 1)``.\n - Otherwise, the output is the input array itself.\n\n Args:\n arys (tuple of arrays): Arrays to be converted. All arguments must be\n cupy.ndarray objects.\n\n Returns:\n If there are only one input, then it returns its converted version.\n Otherwise, it returns a list of converted arrays.\n\n .. seealso:: :func:`numpy.atleast_3d`\n\n \"\"\"\n res = []\n for a in arys:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be atleast_3d')\n if a.ndim == 0:\n a = a.reshape(1, 1, 1)\n elif a.ndim == 1:\n a = a[cupy.newaxis, :, cupy.newaxis]\n elif a.ndim == 2:\n a = a[:, :, cupy.newaxis]\n res.append(a)\n if len(res) == 1:\n res = res[0]\n return res\n\n\nclass broadcast(object):\n \"\"\"Object that performs broadcasting.\n\n CuPy actually uses this class to support broadcasting in various\n operations. Note that this class does not provide an iterator.\n\n Args:\n arrays (tuple of arrays): Arrays to be broadcasted.\n\n Attributes:\n shape (tuple of ints): The broadcasted shape.\n nd (int): Number of dimensions of the broadcasted shape.\n size (int): Total size of the broadcasted shape.\n values (list of arrays): The broadcasted arrays.\n\n .. seealso:: :class:`numpy.broadcast`\n\n \"\"\"\n\n def __init__(self, *arrays):\n ndarray = cupy.ndarray\n rev = slice(None, None, -1)\n shape_arr = [a._shape[rev] for a in arrays\n if isinstance(a, ndarray)]\n r_shape = [max(ss) for ss in zip_longest(*shape_arr, fillvalue=0)]\n\n self.shape = shape = tuple(r_shape[rev])\n self.size = size = internal.prod(shape)\n self.nd = ndim = len(shape)\n\n broadcasted = list(arrays)\n for i, a in enumerate(broadcasted):\n if not isinstance(a, ndarray):\n continue\n\n a_shape = a.shape\n if a_shape == shape:\n continue\n\n r_strides = [\n a_st if sh == a_sh else (0 if a_sh == 1 else None)\n for sh, a_sh, a_st\n in six_zip(r_shape, a._shape[rev], a._strides[rev])]\n\n if None in r_strides:\n raise ValueError('Broadcasting failed')\n\n offset = (0,) * (ndim - len(r_strides))\n\n broadcasted[i] = view = a.view()\n view._shape = shape\n view._strides = offset + tuple(r_strides[rev])\n view._size = size\n view._c_contiguous = -1\n view._f_contiguous = -1\n\n self.values = tuple(broadcasted)\n\n\ndef broadcast_arrays(*args):\n \"\"\"Broadcasts given arrays.\n\n Args:\n args (tuple of arrays): Arrays to broadcast for each other.\n\n Returns:\n list: A list of broadcasted arrays.\n\n .. seealso:: :func:`numpy.broadcast_arrays`\n\n \"\"\"\n return broadcast(*args).values\n\n\ndef expand_dims(a, axis):\n \"\"\"Expands given arrays.\n\n Args:\n a (cupy.ndarray): Array to be expanded.\n axis (int): Position where new axis is to be inserted.\n\n Returns:\n cupy.ndarray: The number of dimensions is one greater than that of\n the input array.\n\n .. seealso:: :func:`numpy.expand_dims`\n\n \"\"\"\n shape = a.shape\n if axis < 0:\n axis = axis + len(shape) + 1\n return a.reshape(shape[:axis] + (1,) + shape[axis:])\n\n\ndef squeeze(a, axis=None):\n \"\"\"Removes size-one axes from the shape of an array.\n\n Args:\n a (cupy.ndarray): Array to be reshaped.\n axis (int or tuple of ints): Axes to be removed. This function removes\n all size-one axes by default. If one of the specified axes is not\n of size one, an exception is raised.\n\n Returns:\n cupy.ndarray: An array without (specified) size-one axes.\n\n .. seealso:: :func:`numpy.squeeze`\n\n \"\"\"\n if axis is None:\n axis = tuple(i for i, n in enumerate(a._shape) if n == 1)\n elif isinstance(axis, int):\n axis = axis,\n\n new_shape = []\n new_strides = []\n j = 0\n for i, n in enumerate(a._shape):\n if j < len(axis) and i == axis[j]:\n if n != 1:\n raise RuntimeError('Cannot squeeze dimension of size > 1')\n j += 1\n else:\n new_shape.append(n)\n new_strides.append(a._strides[i])\n\n v = a.view()\n v._shape = tuple(new_shape)\n v._strides = tuple(new_strides)\n v._c_contiguous = -1\n v._f_contiguous = -1\n return v\n", "path": "cupy/manipulation/dims.py"}]} | 2,641 | 122 |
gh_patches_debug_31157 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-3349 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider signet_jewelers is broken
During the global build at 2021-07-14-14-42-22, spider **signet_jewelers** failed with **2353 features** and **6 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/logs/signet_jewelers.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/signet_jewelers.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/signet_jewelers.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/signet_jewelers.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3 import re
4
5 import scrapy
6
7 from locations.items import GeojsonPointItem
8 from locations.hours import OpeningHours
9
10
11 class SignetJewelersSpider(scrapy.Spider):
12 name = "signet_jewelers"
13 allowed_domains = ['www.jared.com', 'www.kay.com', 'www.zales.com', 'www.pagoda.com', 'www.peoplesjewellers.com',
14 'www.ernestjones.co.uk', 'www.hsamuel.co.uk']
15 download_delay = 0.5 # limit the delay to avoid 403 errors
16
17 ca_prov = ['Alberta', 'British Columbia', 'Manitoba', 'New Brunswick', 'Newfoundland and Labrador',
18 'Nova Scotia', 'Ontario', 'Saskatchewan']
19
20 states = ["Alabama", "Alaska", "Arizona", "Arkansas", "California", "Colorado",
21 "Connecticut", "Delaware", "Florida", "Georgia", "Hawaii", "Idaho", "Illinois",
22 "Indiana", "Iowa", "Kansas", "Kentucky", "Louisiana", "Maine", "Maryland",
23 "Massachusetts", "Michigan", "Minnesota", "Mississippi", "Missouri", "Montana",
24 "Nebraska", "Nevada", "New Hampshire", "New Jersey", "New Mexico", "New York",
25 "North Carolina", "North Dakota", "Ohio", "Oklahoma", "Oregon", "Pennsylvania",
26 "Rhode Island", "South Carolina", "South Dakota", "Tennessee", "Texas", "Utah",
27 "Vermont", "Virginia", "Washington", "West Virginia", "Wisconsin", "Wyoming"
28 ]
29
30 def start_requests(self):
31 north_america_brands = ["jared", "kay", "zales", "pagoda", "peoplesjewellers"]
32
33 uk_urls = [
34 'https://www.hsamuel.co.uk/scripts/dist/store-locator/functionality/store-details.min.js?sprint-17_20190911.3',
35 'https://www.ernestjones.co.uk/scripts/store-locator/storeLocationDetails.js']
36
37 for url in uk_urls:
38 yield scrapy.Request(url=url, callback=self.parse_uk)
39
40 template = 'https://www.{brand}.com/store-finder/view-stores/{region}'
41
42 for brand in north_america_brands:
43 if brand == "peoplesjewellers":
44 for prov in SignetJewelersSpider.ca_prov:
45 url = template.format(brand=brand, region=prov)
46 yield scrapy.Request(url, callback=self.parse_cities)
47 else:
48 for state in SignetJewelersSpider.states:
49 url = template.format(brand=brand, region=state)
50 yield scrapy.Request(url, callback=self.parse_cities)
51
52 def parse_cities(self, response):
53 cities = response.xpath('//*[@class="viewstoreslist"]/a/@href').extract()
54 for i in cities:
55 yield scrapy.Request(response.urljoin(i), callback=self.parse)
56
57 def parse(self, response):
58 script = " ".join(response.xpath('//*[@id="js-store-details"]/div/script/text()').extract())
59 data = re.search(r'storeInformation\s=\s((?s).*)', script).groups()[0]
60 data = data.replace(";", '')
61 data = eval(data)
62
63 if data["region"] in SignetJewelersSpider.ca_prov:
64 country = 'CA'
65 else:
66 country = 'US'
67
68 properties = {
69 'ref': data["name"],
70 'name': data["displayName"],
71 'addr_full': data["line1"],
72 'city': data["town"],
73 'state': data["region"],
74 'postcode': data["postalCode"],
75 'country': country,
76 'lat': data["latitude"],
77 'lon': data["longitude"],
78 'phone': data["phone"],
79 'website': response.url,
80 'brand': re.search(r'www.(\w+)', response.url)[1],
81 }
82
83 yield GeojsonPointItem(**properties)
84
85 def parse_uk(self, response):
86 data = re.search(r'Signet.allStoreDetails=((?s).*)', response.text)[1]
87 data = data.replace(';', '')
88 data = json.loads(data)
89
90 for store in data:
91 properties = {
92 'ref': store["number"],
93 'name': store["name"],
94 'addr_full': store["addressLine1"],
95 'city': store["town"],
96 'postcode': store["postcode"],
97 'country': 'GB',
98 'lat': store["latitude"],
99 'lon': store["longitude"],
100 'brand': re.search(r'www.(\w+)', response.url)[1],
101 }
102
103 yield GeojsonPointItem(**properties)
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/signet_jewelers.py b/locations/spiders/signet_jewelers.py
--- a/locations/spiders/signet_jewelers.py
+++ b/locations/spiders/signet_jewelers.py
@@ -56,29 +56,38 @@
def parse(self, response):
script = " ".join(response.xpath('//*[@id="js-store-details"]/div/script/text()').extract())
- data = re.search(r'storeInformation\s=\s((?s).*)', script).groups()[0]
- data = data.replace(";", '')
- data = eval(data)
-
- if data["region"] in SignetJewelersSpider.ca_prov:
- country = 'CA'
- else:
- country = 'US'
-
- properties = {
- 'ref': data["name"],
- 'name': data["displayName"],
- 'addr_full': data["line1"],
- 'city': data["town"],
- 'state': data["region"],
- 'postcode': data["postalCode"],
- 'country': country,
- 'lat': data["latitude"],
- 'lon': data["longitude"],
- 'phone': data["phone"],
- 'website': response.url,
- 'brand': re.search(r'www.(\w+)', response.url)[1],
- }
+ data = None
+
+ if re.search(r'storeInformation\s=\s((?s).*)', script) is not None:
+ data = re.search(r'storeInformation\s=\s((?s).*)', script).groups()
+
+ properties = {}
+
+ if data is not None:
+ if len(data) > 0:
+ data = data[0]
+ data = data.replace(";", '')
+ data = eval(data)
+
+ if data["region"] in SignetJewelersSpider.ca_prov:
+ country = 'CA'
+ else:
+ country = 'US'
+
+ properties = {
+ 'ref': data["name"],
+ 'name': data["displayName"],
+ 'addr_full': data["line1"],
+ 'city': data["town"],
+ 'state': data["region"],
+ 'postcode': data["postalCode"],
+ 'country': country,
+ 'lat': data["latitude"],
+ 'lon': data["longitude"],
+ 'phone': data["phone"],
+ 'website': response.url,
+ 'brand': re.search(r'www.(\w+)', response.url)[1],
+ }
yield GeojsonPointItem(**properties)
| {"golden_diff": "diff --git a/locations/spiders/signet_jewelers.py b/locations/spiders/signet_jewelers.py\n--- a/locations/spiders/signet_jewelers.py\n+++ b/locations/spiders/signet_jewelers.py\n@@ -56,29 +56,38 @@\n \n def parse(self, response):\n script = \" \".join(response.xpath('//*[@id=\"js-store-details\"]/div/script/text()').extract())\n- data = re.search(r'storeInformation\\s=\\s((?s).*)', script).groups()[0]\n- data = data.replace(\";\", '')\n- data = eval(data)\n-\n- if data[\"region\"] in SignetJewelersSpider.ca_prov:\n- country = 'CA'\n- else:\n- country = 'US'\n-\n- properties = {\n- 'ref': data[\"name\"],\n- 'name': data[\"displayName\"],\n- 'addr_full': data[\"line1\"],\n- 'city': data[\"town\"],\n- 'state': data[\"region\"],\n- 'postcode': data[\"postalCode\"],\n- 'country': country,\n- 'lat': data[\"latitude\"],\n- 'lon': data[\"longitude\"],\n- 'phone': data[\"phone\"],\n- 'website': response.url,\n- 'brand': re.search(r'www.(\\w+)', response.url)[1],\n- }\n+ data = None\n+\n+ if re.search(r'storeInformation\\s=\\s((?s).*)', script) is not None:\n+ data = re.search(r'storeInformation\\s=\\s((?s).*)', script).groups()\n+\n+ properties = {}\n+\n+ if data is not None:\n+ if len(data) > 0:\n+ data = data[0]\n+ data = data.replace(\";\", '')\n+ data = eval(data)\n+\n+ if data[\"region\"] in SignetJewelersSpider.ca_prov:\n+ country = 'CA'\n+ else:\n+ country = 'US'\n+\n+ properties = {\n+ 'ref': data[\"name\"],\n+ 'name': data[\"displayName\"],\n+ 'addr_full': data[\"line1\"],\n+ 'city': data[\"town\"],\n+ 'state': data[\"region\"],\n+ 'postcode': data[\"postalCode\"],\n+ 'country': country,\n+ 'lat': data[\"latitude\"],\n+ 'lon': data[\"longitude\"],\n+ 'phone': data[\"phone\"],\n+ 'website': response.url,\n+ 'brand': re.search(r'www.(\\w+)', response.url)[1],\n+ }\n \n yield GeojsonPointItem(**properties)\n", "issue": "Spider signet_jewelers is broken\nDuring the global build at 2021-07-14-14-42-22, spider **signet_jewelers** failed with **2353 features** and **6 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/logs/signet_jewelers.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/signet_jewelers.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-07-14-14-42-22/output/signet_jewelers.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass SignetJewelersSpider(scrapy.Spider):\n name = \"signet_jewelers\"\n allowed_domains = ['www.jared.com', 'www.kay.com', 'www.zales.com', 'www.pagoda.com', 'www.peoplesjewellers.com',\n 'www.ernestjones.co.uk', 'www.hsamuel.co.uk']\n download_delay = 0.5 # limit the delay to avoid 403 errors\n\n ca_prov = ['Alberta', 'British Columbia', 'Manitoba', 'New Brunswick', 'Newfoundland and Labrador',\n 'Nova Scotia', 'Ontario', 'Saskatchewan']\n\n states = [\"Alabama\", \"Alaska\", \"Arizona\", \"Arkansas\", \"California\", \"Colorado\",\n \"Connecticut\", \"Delaware\", \"Florida\", \"Georgia\", \"Hawaii\", \"Idaho\", \"Illinois\",\n \"Indiana\", \"Iowa\", \"Kansas\", \"Kentucky\", \"Louisiana\", \"Maine\", \"Maryland\",\n \"Massachusetts\", \"Michigan\", \"Minnesota\", \"Mississippi\", \"Missouri\", \"Montana\",\n \"Nebraska\", \"Nevada\", \"New Hampshire\", \"New Jersey\", \"New Mexico\", \"New York\",\n \"North Carolina\", \"North Dakota\", \"Ohio\", \"Oklahoma\", \"Oregon\", \"Pennsylvania\",\n \"Rhode Island\", \"South Carolina\", \"South Dakota\", \"Tennessee\", \"Texas\", \"Utah\",\n \"Vermont\", \"Virginia\", \"Washington\", \"West Virginia\", \"Wisconsin\", \"Wyoming\"\n ]\n\n def start_requests(self):\n north_america_brands = [\"jared\", \"kay\", \"zales\", \"pagoda\", \"peoplesjewellers\"]\n\n uk_urls = [\n 'https://www.hsamuel.co.uk/scripts/dist/store-locator/functionality/store-details.min.js?sprint-17_20190911.3',\n 'https://www.ernestjones.co.uk/scripts/store-locator/storeLocationDetails.js']\n\n for url in uk_urls:\n yield scrapy.Request(url=url, callback=self.parse_uk)\n\n template = 'https://www.{brand}.com/store-finder/view-stores/{region}'\n\n for brand in north_america_brands:\n if brand == \"peoplesjewellers\":\n for prov in SignetJewelersSpider.ca_prov:\n url = template.format(brand=brand, region=prov)\n yield scrapy.Request(url, callback=self.parse_cities)\n else:\n for state in SignetJewelersSpider.states:\n url = template.format(brand=brand, region=state)\n yield scrapy.Request(url, callback=self.parse_cities)\n\n def parse_cities(self, response):\n cities = response.xpath('//*[@class=\"viewstoreslist\"]/a/@href').extract()\n for i in cities:\n yield scrapy.Request(response.urljoin(i), callback=self.parse)\n\n def parse(self, response):\n script = \" \".join(response.xpath('//*[@id=\"js-store-details\"]/div/script/text()').extract())\n data = re.search(r'storeInformation\\s=\\s((?s).*)', script).groups()[0]\n data = data.replace(\";\", '')\n data = eval(data)\n\n if data[\"region\"] in SignetJewelersSpider.ca_prov:\n country = 'CA'\n else:\n country = 'US'\n\n properties = {\n 'ref': data[\"name\"],\n 'name': data[\"displayName\"],\n 'addr_full': data[\"line1\"],\n 'city': data[\"town\"],\n 'state': data[\"region\"],\n 'postcode': data[\"postalCode\"],\n 'country': country,\n 'lat': data[\"latitude\"],\n 'lon': data[\"longitude\"],\n 'phone': data[\"phone\"],\n 'website': response.url,\n 'brand': re.search(r'www.(\\w+)', response.url)[1],\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_uk(self, response):\n data = re.search(r'Signet.allStoreDetails=((?s).*)', response.text)[1]\n data = data.replace(';', '')\n data = json.loads(data)\n\n for store in data:\n properties = {\n 'ref': store[\"number\"],\n 'name': store[\"name\"],\n 'addr_full': store[\"addressLine1\"],\n 'city': store[\"town\"],\n 'postcode': store[\"postcode\"],\n 'country': 'GB',\n 'lat': store[\"latitude\"],\n 'lon': store[\"longitude\"],\n 'brand': re.search(r'www.(\\w+)', response.url)[1],\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/signet_jewelers.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass SignetJewelersSpider(scrapy.Spider):\n name = \"signet_jewelers\"\n allowed_domains = ['www.jared.com', 'www.kay.com', 'www.zales.com', 'www.pagoda.com', 'www.peoplesjewellers.com',\n 'www.ernestjones.co.uk', 'www.hsamuel.co.uk']\n download_delay = 0.5 # limit the delay to avoid 403 errors\n\n ca_prov = ['Alberta', 'British Columbia', 'Manitoba', 'New Brunswick', 'Newfoundland and Labrador',\n 'Nova Scotia', 'Ontario', 'Saskatchewan']\n\n states = [\"Alabama\", \"Alaska\", \"Arizona\", \"Arkansas\", \"California\", \"Colorado\",\n \"Connecticut\", \"Delaware\", \"Florida\", \"Georgia\", \"Hawaii\", \"Idaho\", \"Illinois\",\n \"Indiana\", \"Iowa\", \"Kansas\", \"Kentucky\", \"Louisiana\", \"Maine\", \"Maryland\",\n \"Massachusetts\", \"Michigan\", \"Minnesota\", \"Mississippi\", \"Missouri\", \"Montana\",\n \"Nebraska\", \"Nevada\", \"New Hampshire\", \"New Jersey\", \"New Mexico\", \"New York\",\n \"North Carolina\", \"North Dakota\", \"Ohio\", \"Oklahoma\", \"Oregon\", \"Pennsylvania\",\n \"Rhode Island\", \"South Carolina\", \"South Dakota\", \"Tennessee\", \"Texas\", \"Utah\",\n \"Vermont\", \"Virginia\", \"Washington\", \"West Virginia\", \"Wisconsin\", \"Wyoming\"\n ]\n\n def start_requests(self):\n north_america_brands = [\"jared\", \"kay\", \"zales\", \"pagoda\", \"peoplesjewellers\"]\n\n uk_urls = [\n 'https://www.hsamuel.co.uk/scripts/dist/store-locator/functionality/store-details.min.js?sprint-17_20190911.3',\n 'https://www.ernestjones.co.uk/scripts/store-locator/storeLocationDetails.js']\n\n for url in uk_urls:\n yield scrapy.Request(url=url, callback=self.parse_uk)\n\n template = 'https://www.{brand}.com/store-finder/view-stores/{region}'\n\n for brand in north_america_brands:\n if brand == \"peoplesjewellers\":\n for prov in SignetJewelersSpider.ca_prov:\n url = template.format(brand=brand, region=prov)\n yield scrapy.Request(url, callback=self.parse_cities)\n else:\n for state in SignetJewelersSpider.states:\n url = template.format(brand=brand, region=state)\n yield scrapy.Request(url, callback=self.parse_cities)\n\n def parse_cities(self, response):\n cities = response.xpath('//*[@class=\"viewstoreslist\"]/a/@href').extract()\n for i in cities:\n yield scrapy.Request(response.urljoin(i), callback=self.parse)\n\n def parse(self, response):\n script = \" \".join(response.xpath('//*[@id=\"js-store-details\"]/div/script/text()').extract())\n data = None\n\n if re.search(r'storeInformation\\s=\\s((?s).*)', script) is not None:\n data = re.search(r'storeInformation\\s=\\s((?s).*)', script).groups()\n\n properties = {}\n\n if data is not None:\n if len(data) > 0:\n data = data[0]\n data = data.replace(\";\", '')\n data = eval(data)\n\n if data[\"region\"] in SignetJewelersSpider.ca_prov:\n country = 'CA'\n else:\n country = 'US'\n\n properties = {\n 'ref': data[\"name\"],\n 'name': data[\"displayName\"],\n 'addr_full': data[\"line1\"],\n 'city': data[\"town\"],\n 'state': data[\"region\"],\n 'postcode': data[\"postalCode\"],\n 'country': country,\n 'lat': data[\"latitude\"],\n 'lon': data[\"longitude\"],\n 'phone': data[\"phone\"],\n 'website': response.url,\n 'brand': re.search(r'www.(\\w+)', response.url)[1],\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_uk(self, response):\n data = re.search(r'Signet.allStoreDetails=((?s).*)', response.text)[1]\n data = data.replace(';', '')\n data = json.loads(data)\n\n for store in data:\n properties = {\n 'ref': store[\"number\"],\n 'name': store[\"name\"],\n 'addr_full': store[\"addressLine1\"],\n 'city': store[\"town\"],\n 'postcode': store[\"postcode\"],\n 'country': 'GB',\n 'lat': store[\"latitude\"],\n 'lon': store[\"longitude\"],\n 'brand': re.search(r'www.(\\w+)', response.url)[1],\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/signet_jewelers.py"}]} | 1,731 | 587 |
gh_patches_debug_37852 | rasdani/github-patches | git_diff | akvo__akvo-rsr-5268 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feature Request: Document the results framework
### What are you trying to do?
Understand how the results framework functions
### Describe the solution you'd like
A technical documentation of how it works.
### Have you consider alternatives?
_No response_
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rsr/models/project_hierarchy.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from django.db import models
8 from django.utils.translation import ugettext_lazy as _
9
10
11 class ProjectHierarchy(models.Model):
12 project_relation = 'projecthierarchy__in'
13 root_project = models.OneToOneField('Project', on_delete=models.CASCADE, db_index=True)
14 max_depth = models.PositiveSmallIntegerField()
15 is_master = models.BooleanField(_('is master program'), default=False)
16
17 class Meta:
18 app_label = 'rsr'
19 verbose_name = _('program')
20 verbose_name_plural = _('programs')
21 ordering = ['-id']
22
23 @property
24 def descendants(self):
25 return self.root_project.descendants(max_depth=self.max_depth)
26
27 @property
28 def project_count(self):
29 return self.descendants.count() - 1 # remove root_project from count
30
31 @property
32 def project_ids(self):
33 return self.descendants.values_list('id', flat=True)
34
35 @property
36 def organisation(self):
37 return self.root_project.reporting_org
38
39 def __str__(self):
40 return self.root_project.title
41
```
Path: `doc/conf.py`
Content:
```
1 # Configuration file for the Sphinx documentation builder.
2 #
3 # For the full list of built-in configuration values, see the documentation:
4 # https://www.sphinx-doc.org/en/master/usage/configuration.html
5
6 # -- Project information -----------------------------------------------------
7 # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
8
9 import os
10 import sys
11 import django
12 sys.path.insert(0, os.path.abspath('..'))
13 os.environ['DJANGO_SETTINGS_MODULE'] = 'akvo.settings'
14 django.setup()
15
16 project = 'Akvo RSR'
17 copyright = '2023, Akvo Foundation'
18 author = 'Akvo Foundation'
19
20 # -- General configuration ---------------------------------------------------
21 # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
22
23 extensions = [
24 'sphinx.ext.autodoc',
25 'sphinx.ext.autosummary',
26 'sphinx.ext.viewcode',
27 'myst_parser',
28 ]
29
30 templates_path = ['_templates']
31 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
32
33
34
35 # -- Options for HTML output -------------------------------------------------
36 # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
37
38 html_theme = 'cloud'
39 html_static_path = ['_static']
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rsr/models/project_hierarchy.py b/akvo/rsr/models/project_hierarchy.py
--- a/akvo/rsr/models/project_hierarchy.py
+++ b/akvo/rsr/models/project_hierarchy.py
@@ -9,10 +9,22 @@
class ProjectHierarchy(models.Model):
+ """
+ The actual "Program" with a project hierarchy.
+ """
project_relation = 'projecthierarchy__in'
+
root_project = models.OneToOneField('Project', on_delete=models.CASCADE, db_index=True)
+ """
+ The root of the program
+ It can be used to create subprograms / a program tree
+ """
+
max_depth = models.PositiveSmallIntegerField()
+ """TODO: It is unclear why this field currently exists"""
+
is_master = models.BooleanField(_('is master program'), default=False)
+ """Used when an organisation has one program under which they would like to create subprograms"""
class Meta:
app_label = 'rsr'
@@ -22,10 +34,15 @@
@property
def descendants(self):
+ """
+ The entire tree in a list.
+ No order is guaranteed
+ """
return self.root_project.descendants(max_depth=self.max_depth)
@property
def project_count(self):
+ """The number of children without counting the root project"""
return self.descendants.count() - 1 # remove root_project from count
@property
@@ -34,6 +51,7 @@
@property
def organisation(self):
+ """The reporting organisation of the tree"""
return self.root_project.reporting_org
def __str__(self):
diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -21,6 +21,7 @@
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
+ 'sphinxcontrib.plantuml',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
@@ -30,7 +31,9 @@
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
-
+myst_enable_extensions = [
+ "colon_fence", # https://myst-parser.readthedocs.io/en/latest/syntax/optional.html#syntax-colon-fence
+]
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
| {"golden_diff": "diff --git a/akvo/rsr/models/project_hierarchy.py b/akvo/rsr/models/project_hierarchy.py\n--- a/akvo/rsr/models/project_hierarchy.py\n+++ b/akvo/rsr/models/project_hierarchy.py\n@@ -9,10 +9,22 @@\n \n \n class ProjectHierarchy(models.Model):\n+ \"\"\"\n+ The actual \"Program\" with a project hierarchy.\n+ \"\"\"\n project_relation = 'projecthierarchy__in'\n+\n root_project = models.OneToOneField('Project', on_delete=models.CASCADE, db_index=True)\n+ \"\"\"\n+ The root of the program\n+ It can be used to create subprograms / a program tree\n+ \"\"\"\n+\n max_depth = models.PositiveSmallIntegerField()\n+ \"\"\"TODO: It is unclear why this field currently exists\"\"\"\n+\n is_master = models.BooleanField(_('is master program'), default=False)\n+ \"\"\"Used when an organisation has one program under which they would like to create subprograms\"\"\"\n \n class Meta:\n app_label = 'rsr'\n@@ -22,10 +34,15 @@\n \n @property\n def descendants(self):\n+ \"\"\"\n+ The entire tree in a list.\n+ No order is guaranteed\n+ \"\"\"\n return self.root_project.descendants(max_depth=self.max_depth)\n \n @property\n def project_count(self):\n+ \"\"\"The number of children without counting the root project\"\"\"\n return self.descendants.count() - 1 # remove root_project from count\n \n @property\n@@ -34,6 +51,7 @@\n \n @property\n def organisation(self):\n+ \"\"\"The reporting organisation of the tree\"\"\"\n return self.root_project.reporting_org\n \n def __str__(self):\ndiff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -21,6 +21,7 @@\n # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n \n extensions = [\n+ 'sphinxcontrib.plantuml',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.viewcode',\n@@ -30,7 +31,9 @@\n templates_path = ['_templates']\n exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n \n-\n+myst_enable_extensions = [\n+ \"colon_fence\", # https://myst-parser.readthedocs.io/en/latest/syntax/optional.html#syntax-colon-fence\n+]\n \n # -- Options for HTML output -------------------------------------------------\n # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n", "issue": "Feature Request: Document the results framework\n### What are you trying to do?\n\nUnderstand how the results framework functions\n\n### Describe the solution you'd like\n\nA technical documentation of how it works.\n\n### Have you consider alternatives?\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass ProjectHierarchy(models.Model):\n project_relation = 'projecthierarchy__in'\n root_project = models.OneToOneField('Project', on_delete=models.CASCADE, db_index=True)\n max_depth = models.PositiveSmallIntegerField()\n is_master = models.BooleanField(_('is master program'), default=False)\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _('program')\n verbose_name_plural = _('programs')\n ordering = ['-id']\n\n @property\n def descendants(self):\n return self.root_project.descendants(max_depth=self.max_depth)\n\n @property\n def project_count(self):\n return self.descendants.count() - 1 # remove root_project from count\n\n @property\n def project_ids(self):\n return self.descendants.values_list('id', flat=True)\n\n @property\n def organisation(self):\n return self.root_project.reporting_org\n\n def __str__(self):\n return self.root_project.title\n", "path": "akvo/rsr/models/project_hierarchy.py"}, {"content": "# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nimport os\nimport sys\nimport django\nsys.path.insert(0, os.path.abspath('..'))\nos.environ['DJANGO_SETTINGS_MODULE'] = 'akvo.settings'\ndjango.setup()\n\nproject = 'Akvo RSR'\ncopyright = '2023, Akvo Foundation'\nauthor = 'Akvo Foundation'\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.viewcode',\n 'myst_parser',\n]\n\ntemplates_path = ['_templates']\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n\n\n# -- Options for HTML output -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\nhtml_theme = 'cloud'\nhtml_static_path = ['_static']\n", "path": "doc/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass ProjectHierarchy(models.Model):\n \"\"\"\n The actual \"Program\" with a project hierarchy.\n \"\"\"\n project_relation = 'projecthierarchy__in'\n\n root_project = models.OneToOneField('Project', on_delete=models.CASCADE, db_index=True)\n \"\"\"\n The root of the program\n It can be used to create subprograms / a program tree\n \"\"\"\n\n max_depth = models.PositiveSmallIntegerField()\n \"\"\"TODO: It is unclear why this field currently exists\"\"\"\n\n is_master = models.BooleanField(_('is master program'), default=False)\n \"\"\"Used when an organisation has one program under which they would like to create subprograms\"\"\"\n\n class Meta:\n app_label = 'rsr'\n verbose_name = _('program')\n verbose_name_plural = _('programs')\n ordering = ['-id']\n\n @property\n def descendants(self):\n \"\"\"\n The entire tree in a list.\n No order is guaranteed\n \"\"\"\n return self.root_project.descendants(max_depth=self.max_depth)\n\n @property\n def project_count(self):\n \"\"\"The number of children without counting the root project\"\"\"\n return self.descendants.count() - 1 # remove root_project from count\n\n @property\n def project_ids(self):\n return self.descendants.values_list('id', flat=True)\n\n @property\n def organisation(self):\n \"\"\"The reporting organisation of the tree\"\"\"\n return self.root_project.reporting_org\n\n def __str__(self):\n return self.root_project.title\n", "path": "akvo/rsr/models/project_hierarchy.py"}, {"content": "# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nimport os\nimport sys\nimport django\nsys.path.insert(0, os.path.abspath('..'))\nos.environ['DJANGO_SETTINGS_MODULE'] = 'akvo.settings'\ndjango.setup()\n\nproject = 'Akvo RSR'\ncopyright = '2023, Akvo Foundation'\nauthor = 'Akvo Foundation'\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\n\nextensions = [\n 'sphinxcontrib.plantuml',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.viewcode',\n 'myst_parser',\n]\n\ntemplates_path = ['_templates']\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\nmyst_enable_extensions = [\n \"colon_fence\", # https://myst-parser.readthedocs.io/en/latest/syntax/optional.html#syntax-colon-fence\n]\n\n# -- Options for HTML output -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\nhtml_theme = 'cloud'\nhtml_static_path = ['_static']\n", "path": "doc/conf.py"}]} | 1,031 | 569 |
gh_patches_debug_32277 | rasdani/github-patches | git_diff | weni-ai__bothub-engine-78 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Is possible translate example to same language
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bothub/api/serializers/translate.py`
Content:
```
1 from rest_framework import serializers
2
3 from django.utils.translation import gettext as _
4
5 from bothub.common.models import RepositoryTranslatedExampleEntity
6 from bothub.common.models import RepositoryTranslatedExample
7 from bothub.common.models import RepositoryExample
8
9 from ..validators import CanContributeInRepositoryTranslatedExampleValidator
10 from ..validators import CanContributeInRepositoryExampleValidator
11 from ..validators import TranslatedExampleEntitiesValidator
12
13
14 class RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):
15 class Meta:
16 model = RepositoryTranslatedExampleEntity
17 fields = [
18 'id',
19 'repository_translated_example',
20 'start',
21 'end',
22 'entity',
23 'created_at',
24 'value',
25 ]
26
27 repository_translated_example = serializers.PrimaryKeyRelatedField(
28 queryset=RepositoryTranslatedExample.objects,
29 validators=[
30 CanContributeInRepositoryTranslatedExampleValidator(),
31 ],
32 help_text='Example translation ID')
33 value = serializers.SerializerMethodField()
34
35 def get_value(self, obj):
36 return obj.value
37
38
39 class RepositoryTranslatedExampleSerializer(serializers.ModelSerializer):
40 class Meta:
41 model = RepositoryTranslatedExample
42 fields = [
43 'id',
44 'original_example',
45 'from_language',
46 'language',
47 'text',
48 'has_valid_entities',
49 'entities',
50 'created_at',
51 ]
52
53 original_example = serializers.PrimaryKeyRelatedField(
54 queryset=RepositoryExample.objects,
55 validators=[
56 CanContributeInRepositoryExampleValidator(),
57 ],
58 help_text=_('Example\'s ID'))
59 from_language = serializers.SerializerMethodField()
60 has_valid_entities = serializers.SerializerMethodField()
61 entities = RepositoryTranslatedExampleEntitySeralizer(
62 many=True,
63 read_only=True)
64
65 def get_from_language(self, obj):
66 return obj.original_example.repository_update.language
67
68 def get_has_valid_entities(self, obj):
69 return obj.has_valid_entities
70
71
72 class NewRepositoryTranslatedExampleEntitySeralizer(
73 serializers.ModelSerializer):
74 class Meta:
75 model = RepositoryTranslatedExampleEntity
76 fields = [
77 'start',
78 'end',
79 'entity',
80 ]
81
82
83 class NewRepositoryTranslatedExampleSerializer(serializers.ModelSerializer):
84 class Meta:
85 model = RepositoryTranslatedExample
86 fields = [
87 'id',
88 'original_example',
89 'language',
90 'text',
91 'has_valid_entities',
92 'entities',
93 ]
94
95 def __init__(self, *args, **kwargs):
96 super().__init__(*args, **kwargs)
97 self.validators.append(TranslatedExampleEntitiesValidator())
98
99 original_example = serializers.PrimaryKeyRelatedField(
100 queryset=RepositoryExample.objects,
101 validators=[
102 CanContributeInRepositoryExampleValidator(),
103 ],
104 help_text=_('Example\'s ID'))
105 has_valid_entities = serializers.SerializerMethodField()
106 entities = NewRepositoryTranslatedExampleEntitySeralizer(
107 many=True,
108 style={'text_field': 'text'})
109
110 def get_has_valid_entities(self, obj):
111 return obj.has_valid_entities
112
113 def create(self, validated_data):
114 entities_data = validated_data.pop('entities')
115
116 translated = self.Meta.model.objects.create(**validated_data)
117 for entity_data in entities_data:
118 RepositoryTranslatedExampleEntity.objects.create(
119 repository_translated_example=translated,
120 **entity_data)
121 return translated
122
```
Path: `bothub/api/validators.py`
Content:
```
1 from django.utils.translation import gettext as _
2 from rest_framework.exceptions import PermissionDenied
3 from rest_framework.exceptions import ValidationError
4
5 from bothub.common.models import RepositoryTranslatedExample
6
7
8 class CanContributeInRepositoryValidator(object):
9 def __call__(self, value):
10 user_authorization = value.get_user_authorization(
11 self.request.user)
12 if not user_authorization.can_contribute:
13 raise PermissionDenied(
14 _('You can\'t contribute in this repository'))
15
16 def set_context(self, serializer):
17 self.request = serializer.context.get('request')
18
19
20 class CanContributeInRepositoryExampleValidator(object):
21 def __call__(self, value):
22 repository = value.repository_update.repository
23 user_authorization = repository.get_user_authorization(
24 self.request.user)
25 if not user_authorization.can_contribute:
26 raise PermissionDenied(
27 _('You can\'t contribute in this repository'))
28
29 def set_context(self, serializer):
30 self.request = serializer.context.get('request')
31
32
33 class CanContributeInRepositoryTranslatedExampleValidator(object):
34 def __call__(self, value):
35 repository = value.original_example.repository_update.repository
36 user_authorization = repository.get_user_authorization(
37 self.request.user)
38 if not user_authorization.can_contribute:
39 raise PermissionDenied(
40 _('You can\'t contribute in this repository'))
41
42 def set_context(self, serializer):
43 self.request = serializer.context.get('request')
44
45
46 class TranslatedExampleEntitiesValidator(object):
47 def __call__(self, attrs):
48 original_example = attrs.get('original_example')
49 entities_valid = RepositoryTranslatedExample.same_entities_validator(
50 list(map(lambda x: dict(x), attrs.get('entities'))),
51 list(map(lambda x: x.to_dict, original_example.entities.all())))
52 if not entities_valid:
53 raise ValidationError({'entities': _('Invalid entities')})
54
```
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2
3 with open('requirements.txt') as fp:
4 install_requires = fp.read()
5 install_requires = list(
6 filter(lambda x: len(x) > 0, install_requires.split('\n')))
7
8 setup(
9 name='bothub',
10 version='1.7.2',
11 description='bothub',
12 packages=find_packages(),
13 install_requires=install_requires,
14 python_requires='>=3.6',
15 )
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bothub/api/serializers/translate.py b/bothub/api/serializers/translate.py
--- a/bothub/api/serializers/translate.py
+++ b/bothub/api/serializers/translate.py
@@ -9,6 +9,7 @@
from ..validators import CanContributeInRepositoryTranslatedExampleValidator
from ..validators import CanContributeInRepositoryExampleValidator
from ..validators import TranslatedExampleEntitiesValidator
+from ..validators import TranslatedExampleLanguageValidator
class RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):
@@ -95,6 +96,7 @@
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.validators.append(TranslatedExampleEntitiesValidator())
+ self.validators.append(TranslatedExampleLanguageValidator())
original_example = serializers.PrimaryKeyRelatedField(
queryset=RepositoryExample.objects,
diff --git a/bothub/api/validators.py b/bothub/api/validators.py
--- a/bothub/api/validators.py
+++ b/bothub/api/validators.py
@@ -51,3 +51,11 @@
list(map(lambda x: x.to_dict, original_example.entities.all())))
if not entities_valid:
raise ValidationError({'entities': _('Invalid entities')})
+
+
+class TranslatedExampleLanguageValidator(object):
+ def __call__(self, attrs):
+ original_example = attrs.get('original_example')
+ language = attrs.get('language')
+ if original_example.repository_update.language == language:
+ raise ValidationError({'language': _('Can\'t translate to same language')})
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@
setup(
name='bothub',
- version='1.7.2',
+ version='1.7.3',
description='bothub',
packages=find_packages(),
install_requires=install_requires,
| {"golden_diff": "diff --git a/bothub/api/serializers/translate.py b/bothub/api/serializers/translate.py\n--- a/bothub/api/serializers/translate.py\n+++ b/bothub/api/serializers/translate.py\n@@ -9,6 +9,7 @@\n from ..validators import CanContributeInRepositoryTranslatedExampleValidator\n from ..validators import CanContributeInRepositoryExampleValidator\n from ..validators import TranslatedExampleEntitiesValidator\n+from ..validators import TranslatedExampleLanguageValidator\n \n \n class RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):\n@@ -95,6 +96,7 @@\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.validators.append(TranslatedExampleEntitiesValidator())\n+ self.validators.append(TranslatedExampleLanguageValidator())\n \n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\ndiff --git a/bothub/api/validators.py b/bothub/api/validators.py\n--- a/bothub/api/validators.py\n+++ b/bothub/api/validators.py\n@@ -51,3 +51,11 @@\n list(map(lambda x: x.to_dict, original_example.entities.all())))\n if not entities_valid:\n raise ValidationError({'entities': _('Invalid entities')})\n+\n+\n+class TranslatedExampleLanguageValidator(object):\n+ def __call__(self, attrs):\n+ original_example = attrs.get('original_example')\n+ language = attrs.get('language')\n+ if original_example.repository_update.language == language:\n+ raise ValidationError({'language': _('Can\\'t translate to same language')})\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -7,7 +7,7 @@\n \n setup(\n name='bothub',\n- version='1.7.2',\n+ version='1.7.3',\n description='bothub',\n packages=find_packages(),\n install_requires=install_requires,\n", "issue": "Is possible translate example to same language\n\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom django.utils.translation import gettext as _\n\nfrom bothub.common.models import RepositoryTranslatedExampleEntity\nfrom bothub.common.models import RepositoryTranslatedExample\nfrom bothub.common.models import RepositoryExample\n\nfrom ..validators import CanContributeInRepositoryTranslatedExampleValidator\nfrom ..validators import CanContributeInRepositoryExampleValidator\nfrom ..validators import TranslatedExampleEntitiesValidator\n\n\nclass RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExampleEntity\n fields = [\n 'id',\n 'repository_translated_example',\n 'start',\n 'end',\n 'entity',\n 'created_at',\n 'value',\n ]\n\n repository_translated_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryTranslatedExample.objects,\n validators=[\n CanContributeInRepositoryTranslatedExampleValidator(),\n ],\n help_text='Example translation ID')\n value = serializers.SerializerMethodField()\n\n def get_value(self, obj):\n return obj.value\n\n\nclass RepositoryTranslatedExampleSerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExample\n fields = [\n 'id',\n 'original_example',\n 'from_language',\n 'language',\n 'text',\n 'has_valid_entities',\n 'entities',\n 'created_at',\n ]\n\n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\n validators=[\n CanContributeInRepositoryExampleValidator(),\n ],\n help_text=_('Example\\'s ID'))\n from_language = serializers.SerializerMethodField()\n has_valid_entities = serializers.SerializerMethodField()\n entities = RepositoryTranslatedExampleEntitySeralizer(\n many=True,\n read_only=True)\n\n def get_from_language(self, obj):\n return obj.original_example.repository_update.language\n\n def get_has_valid_entities(self, obj):\n return obj.has_valid_entities\n\n\nclass NewRepositoryTranslatedExampleEntitySeralizer(\n serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExampleEntity\n fields = [\n 'start',\n 'end',\n 'entity',\n ]\n\n\nclass NewRepositoryTranslatedExampleSerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExample\n fields = [\n 'id',\n 'original_example',\n 'language',\n 'text',\n 'has_valid_entities',\n 'entities',\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.validators.append(TranslatedExampleEntitiesValidator())\n\n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\n validators=[\n CanContributeInRepositoryExampleValidator(),\n ],\n help_text=_('Example\\'s ID'))\n has_valid_entities = serializers.SerializerMethodField()\n entities = NewRepositoryTranslatedExampleEntitySeralizer(\n many=True,\n style={'text_field': 'text'})\n\n def get_has_valid_entities(self, obj):\n return obj.has_valid_entities\n\n def create(self, validated_data):\n entities_data = validated_data.pop('entities')\n\n translated = self.Meta.model.objects.create(**validated_data)\n for entity_data in entities_data:\n RepositoryTranslatedExampleEntity.objects.create(\n repository_translated_example=translated,\n **entity_data)\n return translated\n", "path": "bothub/api/serializers/translate.py"}, {"content": "from django.utils.translation import gettext as _\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.exceptions import ValidationError\n\nfrom bothub.common.models import RepositoryTranslatedExample\n\n\nclass CanContributeInRepositoryValidator(object):\n def __call__(self, value):\n user_authorization = value.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass CanContributeInRepositoryExampleValidator(object):\n def __call__(self, value):\n repository = value.repository_update.repository\n user_authorization = repository.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass CanContributeInRepositoryTranslatedExampleValidator(object):\n def __call__(self, value):\n repository = value.original_example.repository_update.repository\n user_authorization = repository.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass TranslatedExampleEntitiesValidator(object):\n def __call__(self, attrs):\n original_example = attrs.get('original_example')\n entities_valid = RepositoryTranslatedExample.same_entities_validator(\n list(map(lambda x: dict(x), attrs.get('entities'))),\n list(map(lambda x: x.to_dict, original_example.entities.all())))\n if not entities_valid:\n raise ValidationError({'entities': _('Invalid entities')})\n", "path": "bothub/api/validators.py"}, {"content": "from setuptools import setup, find_packages\n\nwith open('requirements.txt') as fp:\n install_requires = fp.read()\ninstall_requires = list(\n filter(lambda x: len(x) > 0, install_requires.split('\\n')))\n\nsetup(\n name='bothub',\n version='1.7.2',\n description='bothub',\n packages=find_packages(),\n install_requires=install_requires,\n python_requires='>=3.6',\n)\n", "path": "setup.py"}], "after_files": [{"content": "from rest_framework import serializers\n\nfrom django.utils.translation import gettext as _\n\nfrom bothub.common.models import RepositoryTranslatedExampleEntity\nfrom bothub.common.models import RepositoryTranslatedExample\nfrom bothub.common.models import RepositoryExample\n\nfrom ..validators import CanContributeInRepositoryTranslatedExampleValidator\nfrom ..validators import CanContributeInRepositoryExampleValidator\nfrom ..validators import TranslatedExampleEntitiesValidator\nfrom ..validators import TranslatedExampleLanguageValidator\n\n\nclass RepositoryTranslatedExampleEntitySeralizer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExampleEntity\n fields = [\n 'id',\n 'repository_translated_example',\n 'start',\n 'end',\n 'entity',\n 'created_at',\n 'value',\n ]\n\n repository_translated_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryTranslatedExample.objects,\n validators=[\n CanContributeInRepositoryTranslatedExampleValidator(),\n ],\n help_text='Example translation ID')\n value = serializers.SerializerMethodField()\n\n def get_value(self, obj):\n return obj.value\n\n\nclass RepositoryTranslatedExampleSerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExample\n fields = [\n 'id',\n 'original_example',\n 'from_language',\n 'language',\n 'text',\n 'has_valid_entities',\n 'entities',\n 'created_at',\n ]\n\n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\n validators=[\n CanContributeInRepositoryExampleValidator(),\n ],\n help_text=_('Example\\'s ID'))\n from_language = serializers.SerializerMethodField()\n has_valid_entities = serializers.SerializerMethodField()\n entities = RepositoryTranslatedExampleEntitySeralizer(\n many=True,\n read_only=True)\n\n def get_from_language(self, obj):\n return obj.original_example.repository_update.language\n\n def get_has_valid_entities(self, obj):\n return obj.has_valid_entities\n\n\nclass NewRepositoryTranslatedExampleEntitySeralizer(\n serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExampleEntity\n fields = [\n 'start',\n 'end',\n 'entity',\n ]\n\n\nclass NewRepositoryTranslatedExampleSerializer(serializers.ModelSerializer):\n class Meta:\n model = RepositoryTranslatedExample\n fields = [\n 'id',\n 'original_example',\n 'language',\n 'text',\n 'has_valid_entities',\n 'entities',\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.validators.append(TranslatedExampleEntitiesValidator())\n self.validators.append(TranslatedExampleLanguageValidator())\n\n original_example = serializers.PrimaryKeyRelatedField(\n queryset=RepositoryExample.objects,\n validators=[\n CanContributeInRepositoryExampleValidator(),\n ],\n help_text=_('Example\\'s ID'))\n has_valid_entities = serializers.SerializerMethodField()\n entities = NewRepositoryTranslatedExampleEntitySeralizer(\n many=True,\n style={'text_field': 'text'})\n\n def get_has_valid_entities(self, obj):\n return obj.has_valid_entities\n\n def create(self, validated_data):\n entities_data = validated_data.pop('entities')\n\n translated = self.Meta.model.objects.create(**validated_data)\n for entity_data in entities_data:\n RepositoryTranslatedExampleEntity.objects.create(\n repository_translated_example=translated,\n **entity_data)\n return translated\n", "path": "bothub/api/serializers/translate.py"}, {"content": "from django.utils.translation import gettext as _\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.exceptions import ValidationError\n\nfrom bothub.common.models import RepositoryTranslatedExample\n\n\nclass CanContributeInRepositoryValidator(object):\n def __call__(self, value):\n user_authorization = value.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass CanContributeInRepositoryExampleValidator(object):\n def __call__(self, value):\n repository = value.repository_update.repository\n user_authorization = repository.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass CanContributeInRepositoryTranslatedExampleValidator(object):\n def __call__(self, value):\n repository = value.original_example.repository_update.repository\n user_authorization = repository.get_user_authorization(\n self.request.user)\n if not user_authorization.can_contribute:\n raise PermissionDenied(\n _('You can\\'t contribute in this repository'))\n\n def set_context(self, serializer):\n self.request = serializer.context.get('request')\n\n\nclass TranslatedExampleEntitiesValidator(object):\n def __call__(self, attrs):\n original_example = attrs.get('original_example')\n entities_valid = RepositoryTranslatedExample.same_entities_validator(\n list(map(lambda x: dict(x), attrs.get('entities'))),\n list(map(lambda x: x.to_dict, original_example.entities.all())))\n if not entities_valid:\n raise ValidationError({'entities': _('Invalid entities')})\n\n\nclass TranslatedExampleLanguageValidator(object):\n def __call__(self, attrs):\n original_example = attrs.get('original_example')\n language = attrs.get('language')\n if original_example.repository_update.language == language:\n raise ValidationError({'language': _('Can\\'t translate to same language')})\n", "path": "bothub/api/validators.py"}, {"content": "from setuptools import setup, find_packages\n\nwith open('requirements.txt') as fp:\n install_requires = fp.read()\ninstall_requires = list(\n filter(lambda x: len(x) > 0, install_requires.split('\\n')))\n\nsetup(\n name='bothub',\n version='1.7.3',\n description='bothub',\n packages=find_packages(),\n install_requires=install_requires,\n python_requires='>=3.6',\n)\n", "path": "setup.py"}]} | 1,843 | 424 |
gh_patches_debug_36681 | rasdani/github-patches | git_diff | meltano__meltano-6069 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix `KeyError: venv_name` and similar errors in lockfiles
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/meltano/core/plugin_lock_service.py`
Content:
```
1 """Plugin Lockfile Service."""
2
3 from __future__ import annotations
4
5 import json
6 from pathlib import Path
7
8 from structlog.stdlib import get_logger
9
10 from meltano.core.plugin.base import BasePlugin, PluginRef, StandalonePlugin, Variant
11 from meltano.core.project import Project
12
13 logger = get_logger(__name__)
14
15
16 class LockfileAlreadyExistsError(Exception):
17 """Raised when a plugin lockfile already exists."""
18
19 def __init__(self, message: str, path: Path, plugin: PluginRef):
20 """Create a new LockfileAlreadyExistsError.
21
22 Args:
23 message: The error message.
24 path: The path to the existing lockfile.
25 plugin: The plugin that was locked.
26 """
27 self.path = path
28 self.plugin = plugin
29 super().__init__(message)
30
31
32 class PluginLockService:
33 """Plugin Lockfile Service."""
34
35 def __init__(self, project: Project):
36 """Create a new Plugin Lockfile Service.
37
38 Args:
39 project: The Meltano project.
40 """
41 self.project = project
42
43 def save(
44 self,
45 plugin: BasePlugin,
46 *,
47 overwrite: bool = False,
48 exists_ok: bool = False,
49 ):
50 """Save the plugin lockfile.
51
52 Args:
53 plugin: The plugin definition to save.
54 overwrite: Whether to overwrite the lockfile if it already exists.
55 exists_ok: Whether raise an exception if the lockfile already exists.
56
57 Raises:
58 LockfileAlreadyExistsError: If the lockfile already exists and is not
59 flagged for overwriting.
60 """
61 variant = None if plugin.variant == Variant.DEFAULT_NAME else plugin.variant
62
63 logger.info(f"Locking a {type(plugin)}")
64
65 plugin_def = plugin.definition
66 path = self.project.plugin_lock_path(
67 plugin_def.type,
68 plugin_def.name,
69 variant_name=variant,
70 )
71
72 if path.exists() and not overwrite and not exists_ok:
73 raise LockfileAlreadyExistsError(
74 f"Lockfile already exists: {path}",
75 path,
76 plugin,
77 )
78
79 variant = plugin_def.find_variant(plugin.variant)
80 locked_def = StandalonePlugin.from_variant(
81 variant,
82 plugin.name,
83 plugin.namespace,
84 plugin.type,
85 label=plugin.label,
86 )
87
88 with path.open("w") as lockfile:
89 json.dump(locked_def.canonical(), lockfile, indent=2)
90
91 logger.debug("Locked plugin definition", path=path)
92
```
Path: `src/meltano/core/project_add_service.py`
Content:
```
1 """Add plugins to the project."""
2
3 from __future__ import annotations
4
5 import enum
6
7 from .plugin import BasePlugin, PluginType, Variant
8 from .plugin.project_plugin import ProjectPlugin
9 from .project import Project
10 from .project_plugins_service import PluginAlreadyAddedException, ProjectPluginsService
11
12
13 class PluginAddedReason(str, enum.Enum):
14 """The reason why a plugin was added to the project."""
15
16 #: The plugin was added by the user.
17 USER_REQUEST = "user_request"
18
19 #: The plugin was added because it is related to another plugin.
20 RELATED = "related"
21
22 #: The plugin was added because it is required by another plugin.
23 REQUIRED = "required"
24
25
26 class MissingPluginException(Exception):
27 """Raised when a plugin is not found."""
28
29
30 class ProjectAddService:
31 """Project Add Service."""
32
33 def __init__(
34 self,
35 project: Project,
36 plugins_service: ProjectPluginsService = None,
37 ):
38 """Create a new Project Add Service.
39
40 Args:
41 project: The project to add plugins to.
42 plugins_service: The project plugins service.
43 """
44 self.project = project
45 self.plugins_service = plugins_service or ProjectPluginsService(project)
46
47 def add(
48 self,
49 plugin_type: PluginType,
50 plugin_name: str,
51 lock: bool = True,
52 **attrs,
53 ) -> ProjectPlugin:
54 """Add a new plugin to the project.
55
56 Args:
57 plugin_type: The type of the plugin to add.
58 plugin_name (str): The name of the plugin to add.
59 lock: Whether to generate a lockfile for the plugin.
60 attrs: Additional attributes to add to the plugin.
61
62 Returns:
63 The added plugin.
64 """
65 plugin = ProjectPlugin(
66 plugin_type, plugin_name, **attrs, default_variant=Variant.DEFAULT_NAME
67 )
68
69 with self.plugins_service.disallow_discovery_yaml():
70 self.plugins_service.ensure_parent(plugin)
71
72 # If we are inheriting from a base plugin definition,
73 # repeat the variant and pip_url in meltano.yml
74 parent = plugin.parent
75 if isinstance(parent, BasePlugin):
76 plugin.variant = parent.variant
77 plugin.pip_url = parent.pip_url
78
79 added = self.add_plugin(plugin)
80
81 if lock and not added.is_custom():
82 self.plugins_service.lock_service.save(
83 added.parent,
84 exists_ok=plugin.inherit_from is not None,
85 )
86
87 return added
88
89 def add_plugin(self, plugin: ProjectPlugin):
90 """Add a plugin to the project.
91
92 Args:
93 plugin: The plugin to add.
94
95 Returns:
96 The added plugin.
97 """
98 return self.plugins_service.add_to_file(plugin)
99
100 def add_required(
101 self,
102 plugin: ProjectPlugin,
103 lock: bool = True,
104 ):
105 """Add all required plugins to the project.
106
107 Args:
108 plugin: The plugin to get requirements from.
109 lock: Whether to generate a lockfile for the plugin.
110
111 Returns:
112 The added plugins.
113 """
114 added_plugins = []
115 for plugin_type, plugins in plugin.all_requires.items():
116 for plugin_req in plugins:
117 try:
118 plugin = self.add(
119 plugin_type,
120 plugin_req.name,
121 variant=plugin_req.variant,
122 lock=lock,
123 )
124 except PluginAlreadyAddedException:
125 continue
126
127 added_plugins.append(plugin)
128
129 added_plugins_with_required = []
130 for added in added_plugins:
131 added_plugins_with_required.extend([added, *self.add_required(added)])
132
133 return added_plugins_with_required
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/meltano/core/plugin_lock_service.py b/src/meltano/core/plugin_lock_service.py
--- a/src/meltano/core/plugin_lock_service.py
+++ b/src/meltano/core/plugin_lock_service.py
@@ -7,7 +7,8 @@
from structlog.stdlib import get_logger
-from meltano.core.plugin.base import BasePlugin, PluginRef, StandalonePlugin, Variant
+from meltano.core.plugin.base import PluginRef, StandalonePlugin, Variant
+from meltano.core.plugin.project_plugin import ProjectPlugin
from meltano.core.project import Project
logger = get_logger(__name__)
@@ -42,7 +43,7 @@
def save(
self,
- plugin: BasePlugin,
+ plugin: ProjectPlugin,
*,
overwrite: bool = False,
exists_ok: bool = False,
@@ -58,11 +59,12 @@
LockfileAlreadyExistsError: If the lockfile already exists and is not
flagged for overwriting.
"""
- variant = None if plugin.variant == Variant.DEFAULT_NAME else plugin.variant
-
- logger.info(f"Locking a {type(plugin)}")
+ base_plugin = plugin.parent
+ variant = (
+ None if base_plugin.variant == Variant.DEFAULT_NAME else base_plugin.variant
+ )
- plugin_def = plugin.definition
+ plugin_def = base_plugin.definition
path = self.project.plugin_lock_path(
plugin_def.type,
plugin_def.name,
@@ -76,13 +78,13 @@
plugin,
)
- variant = plugin_def.find_variant(plugin.variant)
+ variant = plugin_def.find_variant(base_plugin.variant)
locked_def = StandalonePlugin.from_variant(
variant,
- plugin.name,
- plugin.namespace,
- plugin.type,
- label=plugin.label,
+ base_plugin.name,
+ base_plugin.namespace,
+ base_plugin.type,
+ label=base_plugin.label,
)
with path.open("w") as lockfile:
diff --git a/src/meltano/core/project_add_service.py b/src/meltano/core/project_add_service.py
--- a/src/meltano/core/project_add_service.py
+++ b/src/meltano/core/project_add_service.py
@@ -80,7 +80,7 @@
if lock and not added.is_custom():
self.plugins_service.lock_service.save(
- added.parent,
+ added,
exists_ok=plugin.inherit_from is not None,
)
| {"golden_diff": "diff --git a/src/meltano/core/plugin_lock_service.py b/src/meltano/core/plugin_lock_service.py\n--- a/src/meltano/core/plugin_lock_service.py\n+++ b/src/meltano/core/plugin_lock_service.py\n@@ -7,7 +7,8 @@\n \n from structlog.stdlib import get_logger\n \n-from meltano.core.plugin.base import BasePlugin, PluginRef, StandalonePlugin, Variant\n+from meltano.core.plugin.base import PluginRef, StandalonePlugin, Variant\n+from meltano.core.plugin.project_plugin import ProjectPlugin\n from meltano.core.project import Project\n \n logger = get_logger(__name__)\n@@ -42,7 +43,7 @@\n \n def save(\n self,\n- plugin: BasePlugin,\n+ plugin: ProjectPlugin,\n *,\n overwrite: bool = False,\n exists_ok: bool = False,\n@@ -58,11 +59,12 @@\n LockfileAlreadyExistsError: If the lockfile already exists and is not\n flagged for overwriting.\n \"\"\"\n- variant = None if plugin.variant == Variant.DEFAULT_NAME else plugin.variant\n-\n- logger.info(f\"Locking a {type(plugin)}\")\n+ base_plugin = plugin.parent\n+ variant = (\n+ None if base_plugin.variant == Variant.DEFAULT_NAME else base_plugin.variant\n+ )\n \n- plugin_def = plugin.definition\n+ plugin_def = base_plugin.definition\n path = self.project.plugin_lock_path(\n plugin_def.type,\n plugin_def.name,\n@@ -76,13 +78,13 @@\n plugin,\n )\n \n- variant = plugin_def.find_variant(plugin.variant)\n+ variant = plugin_def.find_variant(base_plugin.variant)\n locked_def = StandalonePlugin.from_variant(\n variant,\n- plugin.name,\n- plugin.namespace,\n- plugin.type,\n- label=plugin.label,\n+ base_plugin.name,\n+ base_plugin.namespace,\n+ base_plugin.type,\n+ label=base_plugin.label,\n )\n \n with path.open(\"w\") as lockfile:\ndiff --git a/src/meltano/core/project_add_service.py b/src/meltano/core/project_add_service.py\n--- a/src/meltano/core/project_add_service.py\n+++ b/src/meltano/core/project_add_service.py\n@@ -80,7 +80,7 @@\n \n if lock and not added.is_custom():\n self.plugins_service.lock_service.save(\n- added.parent,\n+ added,\n exists_ok=plugin.inherit_from is not None,\n )\n", "issue": "Fix `KeyError: venv_name` and similar errors in lockfiles\n\n", "before_files": [{"content": "\"\"\"Plugin Lockfile Service.\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nfrom pathlib import Path\n\nfrom structlog.stdlib import get_logger\n\nfrom meltano.core.plugin.base import BasePlugin, PluginRef, StandalonePlugin, Variant\nfrom meltano.core.project import Project\n\nlogger = get_logger(__name__)\n\n\nclass LockfileAlreadyExistsError(Exception):\n \"\"\"Raised when a plugin lockfile already exists.\"\"\"\n\n def __init__(self, message: str, path: Path, plugin: PluginRef):\n \"\"\"Create a new LockfileAlreadyExistsError.\n\n Args:\n message: The error message.\n path: The path to the existing lockfile.\n plugin: The plugin that was locked.\n \"\"\"\n self.path = path\n self.plugin = plugin\n super().__init__(message)\n\n\nclass PluginLockService:\n \"\"\"Plugin Lockfile Service.\"\"\"\n\n def __init__(self, project: Project):\n \"\"\"Create a new Plugin Lockfile Service.\n\n Args:\n project: The Meltano project.\n \"\"\"\n self.project = project\n\n def save(\n self,\n plugin: BasePlugin,\n *,\n overwrite: bool = False,\n exists_ok: bool = False,\n ):\n \"\"\"Save the plugin lockfile.\n\n Args:\n plugin: The plugin definition to save.\n overwrite: Whether to overwrite the lockfile if it already exists.\n exists_ok: Whether raise an exception if the lockfile already exists.\n\n Raises:\n LockfileAlreadyExistsError: If the lockfile already exists and is not\n flagged for overwriting.\n \"\"\"\n variant = None if plugin.variant == Variant.DEFAULT_NAME else plugin.variant\n\n logger.info(f\"Locking a {type(plugin)}\")\n\n plugin_def = plugin.definition\n path = self.project.plugin_lock_path(\n plugin_def.type,\n plugin_def.name,\n variant_name=variant,\n )\n\n if path.exists() and not overwrite and not exists_ok:\n raise LockfileAlreadyExistsError(\n f\"Lockfile already exists: {path}\",\n path,\n plugin,\n )\n\n variant = plugin_def.find_variant(plugin.variant)\n locked_def = StandalonePlugin.from_variant(\n variant,\n plugin.name,\n plugin.namespace,\n plugin.type,\n label=plugin.label,\n )\n\n with path.open(\"w\") as lockfile:\n json.dump(locked_def.canonical(), lockfile, indent=2)\n\n logger.debug(\"Locked plugin definition\", path=path)\n", "path": "src/meltano/core/plugin_lock_service.py"}, {"content": "\"\"\"Add plugins to the project.\"\"\"\n\nfrom __future__ import annotations\n\nimport enum\n\nfrom .plugin import BasePlugin, PluginType, Variant\nfrom .plugin.project_plugin import ProjectPlugin\nfrom .project import Project\nfrom .project_plugins_service import PluginAlreadyAddedException, ProjectPluginsService\n\n\nclass PluginAddedReason(str, enum.Enum):\n \"\"\"The reason why a plugin was added to the project.\"\"\"\n\n #: The plugin was added by the user.\n USER_REQUEST = \"user_request\"\n\n #: The plugin was added because it is related to another plugin.\n RELATED = \"related\"\n\n #: The plugin was added because it is required by another plugin.\n REQUIRED = \"required\"\n\n\nclass MissingPluginException(Exception):\n \"\"\"Raised when a plugin is not found.\"\"\"\n\n\nclass ProjectAddService:\n \"\"\"Project Add Service.\"\"\"\n\n def __init__(\n self,\n project: Project,\n plugins_service: ProjectPluginsService = None,\n ):\n \"\"\"Create a new Project Add Service.\n\n Args:\n project: The project to add plugins to.\n plugins_service: The project plugins service.\n \"\"\"\n self.project = project\n self.plugins_service = plugins_service or ProjectPluginsService(project)\n\n def add(\n self,\n plugin_type: PluginType,\n plugin_name: str,\n lock: bool = True,\n **attrs,\n ) -> ProjectPlugin:\n \"\"\"Add a new plugin to the project.\n\n Args:\n plugin_type: The type of the plugin to add.\n plugin_name (str): The name of the plugin to add.\n lock: Whether to generate a lockfile for the plugin.\n attrs: Additional attributes to add to the plugin.\n\n Returns:\n The added plugin.\n \"\"\"\n plugin = ProjectPlugin(\n plugin_type, plugin_name, **attrs, default_variant=Variant.DEFAULT_NAME\n )\n\n with self.plugins_service.disallow_discovery_yaml():\n self.plugins_service.ensure_parent(plugin)\n\n # If we are inheriting from a base plugin definition,\n # repeat the variant and pip_url in meltano.yml\n parent = plugin.parent\n if isinstance(parent, BasePlugin):\n plugin.variant = parent.variant\n plugin.pip_url = parent.pip_url\n\n added = self.add_plugin(plugin)\n\n if lock and not added.is_custom():\n self.plugins_service.lock_service.save(\n added.parent,\n exists_ok=plugin.inherit_from is not None,\n )\n\n return added\n\n def add_plugin(self, plugin: ProjectPlugin):\n \"\"\"Add a plugin to the project.\n\n Args:\n plugin: The plugin to add.\n\n Returns:\n The added plugin.\n \"\"\"\n return self.plugins_service.add_to_file(plugin)\n\n def add_required(\n self,\n plugin: ProjectPlugin,\n lock: bool = True,\n ):\n \"\"\"Add all required plugins to the project.\n\n Args:\n plugin: The plugin to get requirements from.\n lock: Whether to generate a lockfile for the plugin.\n\n Returns:\n The added plugins.\n \"\"\"\n added_plugins = []\n for plugin_type, plugins in plugin.all_requires.items():\n for plugin_req in plugins:\n try:\n plugin = self.add(\n plugin_type,\n plugin_req.name,\n variant=plugin_req.variant,\n lock=lock,\n )\n except PluginAlreadyAddedException:\n continue\n\n added_plugins.append(plugin)\n\n added_plugins_with_required = []\n for added in added_plugins:\n added_plugins_with_required.extend([added, *self.add_required(added)])\n\n return added_plugins_with_required\n", "path": "src/meltano/core/project_add_service.py"}], "after_files": [{"content": "\"\"\"Plugin Lockfile Service.\"\"\"\n\nfrom __future__ import annotations\n\nimport json\nfrom pathlib import Path\n\nfrom structlog.stdlib import get_logger\n\nfrom meltano.core.plugin.base import PluginRef, StandalonePlugin, Variant\nfrom meltano.core.plugin.project_plugin import ProjectPlugin\nfrom meltano.core.project import Project\n\nlogger = get_logger(__name__)\n\n\nclass LockfileAlreadyExistsError(Exception):\n \"\"\"Raised when a plugin lockfile already exists.\"\"\"\n\n def __init__(self, message: str, path: Path, plugin: PluginRef):\n \"\"\"Create a new LockfileAlreadyExistsError.\n\n Args:\n message: The error message.\n path: The path to the existing lockfile.\n plugin: The plugin that was locked.\n \"\"\"\n self.path = path\n self.plugin = plugin\n super().__init__(message)\n\n\nclass PluginLockService:\n \"\"\"Plugin Lockfile Service.\"\"\"\n\n def __init__(self, project: Project):\n \"\"\"Create a new Plugin Lockfile Service.\n\n Args:\n project: The Meltano project.\n \"\"\"\n self.project = project\n\n def save(\n self,\n plugin: ProjectPlugin,\n *,\n overwrite: bool = False,\n exists_ok: bool = False,\n ):\n \"\"\"Save the plugin lockfile.\n\n Args:\n plugin: The plugin definition to save.\n overwrite: Whether to overwrite the lockfile if it already exists.\n exists_ok: Whether raise an exception if the lockfile already exists.\n\n Raises:\n LockfileAlreadyExistsError: If the lockfile already exists and is not\n flagged for overwriting.\n \"\"\"\n base_plugin = plugin.parent\n variant = (\n None if base_plugin.variant == Variant.DEFAULT_NAME else base_plugin.variant\n )\n\n plugin_def = base_plugin.definition\n path = self.project.plugin_lock_path(\n plugin_def.type,\n plugin_def.name,\n variant_name=variant,\n )\n\n if path.exists() and not overwrite and not exists_ok:\n raise LockfileAlreadyExistsError(\n f\"Lockfile already exists: {path}\",\n path,\n plugin,\n )\n\n variant = plugin_def.find_variant(base_plugin.variant)\n locked_def = StandalonePlugin.from_variant(\n variant,\n base_plugin.name,\n base_plugin.namespace,\n base_plugin.type,\n label=base_plugin.label,\n )\n\n with path.open(\"w\") as lockfile:\n json.dump(locked_def.canonical(), lockfile, indent=2)\n\n logger.debug(\"Locked plugin definition\", path=path)\n", "path": "src/meltano/core/plugin_lock_service.py"}, {"content": "\"\"\"Add plugins to the project.\"\"\"\n\nfrom __future__ import annotations\n\nimport enum\n\nfrom .plugin import BasePlugin, PluginType, Variant\nfrom .plugin.project_plugin import ProjectPlugin\nfrom .project import Project\nfrom .project_plugins_service import PluginAlreadyAddedException, ProjectPluginsService\n\n\nclass PluginAddedReason(str, enum.Enum):\n \"\"\"The reason why a plugin was added to the project.\"\"\"\n\n #: The plugin was added by the user.\n USER_REQUEST = \"user_request\"\n\n #: The plugin was added because it is related to another plugin.\n RELATED = \"related\"\n\n #: The plugin was added because it is required by another plugin.\n REQUIRED = \"required\"\n\n\nclass MissingPluginException(Exception):\n \"\"\"Raised when a plugin is not found.\"\"\"\n\n\nclass ProjectAddService:\n \"\"\"Project Add Service.\"\"\"\n\n def __init__(\n self,\n project: Project,\n plugins_service: ProjectPluginsService = None,\n ):\n \"\"\"Create a new Project Add Service.\n\n Args:\n project: The project to add plugins to.\n plugins_service: The project plugins service.\n \"\"\"\n self.project = project\n self.plugins_service = plugins_service or ProjectPluginsService(project)\n\n def add(\n self,\n plugin_type: PluginType,\n plugin_name: str,\n lock: bool = True,\n **attrs,\n ) -> ProjectPlugin:\n \"\"\"Add a new plugin to the project.\n\n Args:\n plugin_type: The type of the plugin to add.\n plugin_name (str): The name of the plugin to add.\n lock: Whether to generate a lockfile for the plugin.\n attrs: Additional attributes to add to the plugin.\n\n Returns:\n The added plugin.\n \"\"\"\n plugin = ProjectPlugin(\n plugin_type, plugin_name, **attrs, default_variant=Variant.DEFAULT_NAME\n )\n\n with self.plugins_service.disallow_discovery_yaml():\n self.plugins_service.ensure_parent(plugin)\n\n # If we are inheriting from a base plugin definition,\n # repeat the variant and pip_url in meltano.yml\n parent = plugin.parent\n if isinstance(parent, BasePlugin):\n plugin.variant = parent.variant\n plugin.pip_url = parent.pip_url\n\n added = self.add_plugin(plugin)\n\n if lock and not added.is_custom():\n self.plugins_service.lock_service.save(\n added,\n exists_ok=plugin.inherit_from is not None,\n )\n\n return added\n\n def add_plugin(self, plugin: ProjectPlugin):\n \"\"\"Add a plugin to the project.\n\n Args:\n plugin: The plugin to add.\n\n Returns:\n The added plugin.\n \"\"\"\n return self.plugins_service.add_to_file(plugin)\n\n def add_required(\n self,\n plugin: ProjectPlugin,\n lock: bool = True,\n ):\n \"\"\"Add all required plugins to the project.\n\n Args:\n plugin: The plugin to get requirements from.\n lock: Whether to generate a lockfile for the plugin.\n\n Returns:\n The added plugins.\n \"\"\"\n added_plugins = []\n for plugin_type, plugins in plugin.all_requires.items():\n for plugin_req in plugins:\n try:\n plugin = self.add(\n plugin_type,\n plugin_req.name,\n variant=plugin_req.variant,\n lock=lock,\n )\n except PluginAlreadyAddedException:\n continue\n\n added_plugins.append(plugin)\n\n added_plugins_with_required = []\n for added in added_plugins:\n added_plugins_with_required.extend([added, *self.add_required(added)])\n\n return added_plugins_with_required\n", "path": "src/meltano/core/project_add_service.py"}]} | 2,062 | 540 |
gh_patches_debug_6562 | rasdani/github-patches | git_diff | activeloopai__deeplake-1350 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] dataset_meta.json isn't updated after create_tensor
## 🐛🐛 Bug Report
After loading an existing dataset with hub.dataset, create_tensor doesn't update the "tensors" attribute in dataset_meta.json.
### ⚗️ Current Behavior
For example, a dataset called "hub_dataset" with tensor "images" already exists. Loading it with hub.dataset then appending a new tensor, "test":
```python
ds = hub.dataset("hub_dataset")
ds.create_tensor('test')
```
The "tensors" attribute in dataset_meta.json won't contain the "test" tensor.
```json
{"version": "2.0.8", "tensors": ["images"]}
```
### ⚙️ Environment
- `Python` version(s): [3.7.11]
- `OS`: Ubuntu 18.04
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hub/core/storage/local.py`
Content:
```
1 import os
2 import shutil
3 from typing import Optional, Set
4
5 from hub.core.storage.provider import StorageProvider
6 from hub.util.exceptions import DirectoryAtPathException, FileAtPathException
7
8
9 class LocalProvider(StorageProvider):
10 """Provider class for using the local filesystem."""
11
12 def __init__(self, root: str):
13 """Initializes the LocalProvider.
14
15 Example:
16 local_provider = LocalProvider("/home/ubuntu/Documents/")
17
18 Args:
19 root (str): The root of the provider. All read/write request keys will be appended to root."
20
21 Raises:
22 FileAtPathException: If the root is a file instead of a directory.
23 """
24 if os.path.isfile(root):
25 raise FileAtPathException(root)
26 self.root = root
27 self.files: Optional[Set[str]] = None
28
29 def subdir(self, path: str):
30 return self.__class__(os.path.join(self.root, path))
31
32 def __getitem__(self, path: str):
33 """Gets the object present at the path within the given byte range.
34
35 Example:
36 local_provider = LocalProvider("/home/ubuntu/Documents/")
37 my_data = local_provider["abc.txt"]
38
39 Args:
40 path (str): The path relative to the root of the provider.
41
42 Returns:
43 bytes: The bytes of the object present at the path.
44
45 Raises:
46 KeyError: If an object is not found at the path.
47 DirectoryAtPathException: If a directory is found at the path.
48 Exception: Any other exception encountered while trying to fetch the object.
49 """
50 try:
51 full_path = self._check_is_file(path)
52 with open(full_path, "rb") as file:
53 return file.read()
54 except DirectoryAtPathException:
55 raise
56 except FileNotFoundError:
57 raise KeyError(path)
58
59 def __setitem__(self, path: str, value: bytes):
60 """Sets the object present at the path with the value
61
62 Example:
63 local_provider = LocalProvider("/home/ubuntu/Documents/")
64 local_provider["abc.txt"] = b"abcd"
65
66 Args:
67 path (str): the path relative to the root of the provider.
68 value (bytes): the value to be assigned at the path.
69
70 Raises:
71 Exception: If unable to set item due to directory at path or permission or space issues.
72 FileAtPathException: If the directory to the path is a file instead of a directory.
73 ReadOnlyError: If the provider is in read-only mode.
74 """
75 self.check_readonly()
76 full_path = self._check_is_file(path)
77 directory = os.path.dirname(full_path)
78 if os.path.isfile(directory):
79 raise FileAtPathException(directory)
80 if not os.path.exists(directory):
81 os.makedirs(directory, exist_ok=True)
82 with open(full_path, "wb") as file:
83 file.write(value)
84 if self.files is not None:
85 self.files.add(path)
86
87 def __delitem__(self, path: str):
88 """Delete the object present at the path.
89
90 Example:
91 local_provider = LocalProvider("/home/ubuntu/Documents/")
92 del local_provider["abc.txt"]
93
94 Args:
95 path (str): the path to the object relative to the root of the provider.
96
97 Raises:
98 KeyError: If an object is not found at the path.
99 DirectoryAtPathException: If a directory is found at the path.
100 Exception: Any other exception encountered while trying to fetch the object.
101 ReadOnlyError: If the provider is in read-only mode.
102 """
103 self.check_readonly()
104 try:
105 full_path = self._check_is_file(path)
106 os.remove(full_path)
107 if self.files is not None:
108 self.files.discard(path)
109 except DirectoryAtPathException:
110 raise
111 except FileNotFoundError:
112 raise KeyError
113
114 def __iter__(self):
115 """Generator function that iterates over the keys of the provider.
116
117 Example:
118 local_provider = LocalProvider("/home/ubuntu/Documents/")
119 for my_data in local_provider:
120 pass
121
122 Yields:
123 str: the path of the object that it is iterating over, relative to the root of the provider.
124 """
125 yield from self._all_keys()
126
127 def __len__(self):
128 """Returns the number of files present inside the root of the provider.
129
130 Example:
131 local_provider = LocalProvider("/home/ubuntu/Documents/")
132 len(local_provider)
133
134 Returns:
135 int: the number of files present inside the root.
136 """
137 return len(self._all_keys())
138
139 def _all_keys(self, refresh: bool = False) -> Set[str]:
140 """Lists all the objects present at the root of the Provider.
141
142 Args:
143 refresh (bool): refresh keys
144
145 Returns:
146 set: set of all the objects found at the root of the Provider.
147 """
148 if self.files is None or refresh:
149 full_path = os.path.expanduser(self.root)
150 key_set = set()
151 for root, dirs, files in os.walk(full_path):
152 for file in files:
153 key_set.add(
154 os.path.relpath(os.path.join(full_path, file), full_path)
155 )
156 self.files = key_set
157 return self.files
158
159 def _check_is_file(self, path: str):
160 """Checks if the path is a file. Returns the full_path to file if True.
161
162 Args:
163 path (str): the path to the object relative to the root of the provider.
164
165 Returns:
166 str: the full path to the requested file.
167
168 Raises:
169 DirectoryAtPathException: If a directory is found at the path.
170 """
171 full_path = os.path.join(self.root, path)
172 full_path = os.path.expanduser(full_path)
173 if os.path.isdir(full_path):
174 raise DirectoryAtPathException
175 return full_path
176
177 def clear(self):
178 """Deletes ALL data on the local machine (under self.root). Exercise caution!"""
179 self.check_readonly()
180 self.files = set()
181 full_path = os.path.expanduser(self.root)
182 if os.path.exists(full_path):
183 shutil.rmtree(full_path)
184
185 def __contains__(self, key) -> bool:
186 full_path = self._check_is_file(key)
187 return os.path.exists(full_path)
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hub/core/storage/local.py b/hub/core/storage/local.py
--- a/hub/core/storage/local.py
+++ b/hub/core/storage/local.py
@@ -150,9 +150,7 @@
key_set = set()
for root, dirs, files in os.walk(full_path):
for file in files:
- key_set.add(
- os.path.relpath(os.path.join(full_path, file), full_path)
- )
+ key_set.add(os.path.relpath(os.path.join(root, file), full_path))
self.files = key_set
return self.files
| {"golden_diff": "diff --git a/hub/core/storage/local.py b/hub/core/storage/local.py\n--- a/hub/core/storage/local.py\n+++ b/hub/core/storage/local.py\n@@ -150,9 +150,7 @@\n key_set = set()\n for root, dirs, files in os.walk(full_path):\n for file in files:\n- key_set.add(\n- os.path.relpath(os.path.join(full_path, file), full_path)\n- )\n+ key_set.add(os.path.relpath(os.path.join(root, file), full_path))\n self.files = key_set\n return self.files\n", "issue": "[BUG] dataset_meta.json isn't updated after create_tensor\n## \ud83d\udc1b\ud83d\udc1b Bug Report\r\n\r\nAfter loading an existing dataset with hub.dataset, create_tensor doesn't update the \"tensors\" attribute in dataset_meta.json.\r\n\r\n### \u2697\ufe0f Current Behavior\r\n\r\nFor example, a dataset called \"hub_dataset\" with tensor \"images\" already exists. Loading it with hub.dataset then appending a new tensor, \"test\":\r\n\r\n```python\r\nds = hub.dataset(\"hub_dataset\")\r\nds.create_tensor('test')\r\n```\r\nThe \"tensors\" attribute in dataset_meta.json won't contain the \"test\" tensor.\r\n\r\n```json\r\n{\"version\": \"2.0.8\", \"tensors\": [\"images\"]}\r\n```\r\n\r\n### \u2699\ufe0f Environment\r\n- `Python` version(s): [3.7.11]\r\n- `OS`: Ubuntu 18.04\n", "before_files": [{"content": "import os\nimport shutil\nfrom typing import Optional, Set\n\nfrom hub.core.storage.provider import StorageProvider\nfrom hub.util.exceptions import DirectoryAtPathException, FileAtPathException\n\n\nclass LocalProvider(StorageProvider):\n \"\"\"Provider class for using the local filesystem.\"\"\"\n\n def __init__(self, root: str):\n \"\"\"Initializes the LocalProvider.\n\n Example:\n local_provider = LocalProvider(\"/home/ubuntu/Documents/\")\n\n Args:\n root (str): The root of the provider. All read/write request keys will be appended to root.\"\n\n Raises:\n FileAtPathException: If the root is a file instead of a directory.\n \"\"\"\n if os.path.isfile(root):\n raise FileAtPathException(root)\n self.root = root\n self.files: Optional[Set[str]] = None\n\n def subdir(self, path: str):\n return self.__class__(os.path.join(self.root, path))\n\n def __getitem__(self, path: str):\n \"\"\"Gets the object present at the path within the given byte range.\n\n Example:\n local_provider = LocalProvider(\"/home/ubuntu/Documents/\")\n my_data = local_provider[\"abc.txt\"]\n\n Args:\n path (str): The path relative to the root of the provider.\n\n Returns:\n bytes: The bytes of the object present at the path.\n\n Raises:\n KeyError: If an object is not found at the path.\n DirectoryAtPathException: If a directory is found at the path.\n Exception: Any other exception encountered while trying to fetch the object.\n \"\"\"\n try:\n full_path = self._check_is_file(path)\n with open(full_path, \"rb\") as file:\n return file.read()\n except DirectoryAtPathException:\n raise\n except FileNotFoundError:\n raise KeyError(path)\n\n def __setitem__(self, path: str, value: bytes):\n \"\"\"Sets the object present at the path with the value\n\n Example:\n local_provider = LocalProvider(\"/home/ubuntu/Documents/\")\n local_provider[\"abc.txt\"] = b\"abcd\"\n\n Args:\n path (str): the path relative to the root of the provider.\n value (bytes): the value to be assigned at the path.\n\n Raises:\n Exception: If unable to set item due to directory at path or permission or space issues.\n FileAtPathException: If the directory to the path is a file instead of a directory.\n ReadOnlyError: If the provider is in read-only mode.\n \"\"\"\n self.check_readonly()\n full_path = self._check_is_file(path)\n directory = os.path.dirname(full_path)\n if os.path.isfile(directory):\n raise FileAtPathException(directory)\n if not os.path.exists(directory):\n os.makedirs(directory, exist_ok=True)\n with open(full_path, \"wb\") as file:\n file.write(value)\n if self.files is not None:\n self.files.add(path)\n\n def __delitem__(self, path: str):\n \"\"\"Delete the object present at the path.\n\n Example:\n local_provider = LocalProvider(\"/home/ubuntu/Documents/\")\n del local_provider[\"abc.txt\"]\n\n Args:\n path (str): the path to the object relative to the root of the provider.\n\n Raises:\n KeyError: If an object is not found at the path.\n DirectoryAtPathException: If a directory is found at the path.\n Exception: Any other exception encountered while trying to fetch the object.\n ReadOnlyError: If the provider is in read-only mode.\n \"\"\"\n self.check_readonly()\n try:\n full_path = self._check_is_file(path)\n os.remove(full_path)\n if self.files is not None:\n self.files.discard(path)\n except DirectoryAtPathException:\n raise\n except FileNotFoundError:\n raise KeyError\n\n def __iter__(self):\n \"\"\"Generator function that iterates over the keys of the provider.\n\n Example:\n local_provider = LocalProvider(\"/home/ubuntu/Documents/\")\n for my_data in local_provider:\n pass\n\n Yields:\n str: the path of the object that it is iterating over, relative to the root of the provider.\n \"\"\"\n yield from self._all_keys()\n\n def __len__(self):\n \"\"\"Returns the number of files present inside the root of the provider.\n\n Example:\n local_provider = LocalProvider(\"/home/ubuntu/Documents/\")\n len(local_provider)\n\n Returns:\n int: the number of files present inside the root.\n \"\"\"\n return len(self._all_keys())\n\n def _all_keys(self, refresh: bool = False) -> Set[str]:\n \"\"\"Lists all the objects present at the root of the Provider.\n\n Args:\n refresh (bool): refresh keys\n\n Returns:\n set: set of all the objects found at the root of the Provider.\n \"\"\"\n if self.files is None or refresh:\n full_path = os.path.expanduser(self.root)\n key_set = set()\n for root, dirs, files in os.walk(full_path):\n for file in files:\n key_set.add(\n os.path.relpath(os.path.join(full_path, file), full_path)\n )\n self.files = key_set\n return self.files\n\n def _check_is_file(self, path: str):\n \"\"\"Checks if the path is a file. Returns the full_path to file if True.\n\n Args:\n path (str): the path to the object relative to the root of the provider.\n\n Returns:\n str: the full path to the requested file.\n\n Raises:\n DirectoryAtPathException: If a directory is found at the path.\n \"\"\"\n full_path = os.path.join(self.root, path)\n full_path = os.path.expanduser(full_path)\n if os.path.isdir(full_path):\n raise DirectoryAtPathException\n return full_path\n\n def clear(self):\n \"\"\"Deletes ALL data on the local machine (under self.root). Exercise caution!\"\"\"\n self.check_readonly()\n self.files = set()\n full_path = os.path.expanduser(self.root)\n if os.path.exists(full_path):\n shutil.rmtree(full_path)\n\n def __contains__(self, key) -> bool:\n full_path = self._check_is_file(key)\n return os.path.exists(full_path)\n", "path": "hub/core/storage/local.py"}], "after_files": [{"content": "import os\nimport shutil\nfrom typing import Optional, Set\n\nfrom hub.core.storage.provider import StorageProvider\nfrom hub.util.exceptions import DirectoryAtPathException, FileAtPathException\n\n\nclass LocalProvider(StorageProvider):\n \"\"\"Provider class for using the local filesystem.\"\"\"\n\n def __init__(self, root: str):\n \"\"\"Initializes the LocalProvider.\n\n Example:\n local_provider = LocalProvider(\"/home/ubuntu/Documents/\")\n\n Args:\n root (str): The root of the provider. All read/write request keys will be appended to root.\"\n\n Raises:\n FileAtPathException: If the root is a file instead of a directory.\n \"\"\"\n if os.path.isfile(root):\n raise FileAtPathException(root)\n self.root = root\n self.files: Optional[Set[str]] = None\n\n def subdir(self, path: str):\n return self.__class__(os.path.join(self.root, path))\n\n def __getitem__(self, path: str):\n \"\"\"Gets the object present at the path within the given byte range.\n\n Example:\n local_provider = LocalProvider(\"/home/ubuntu/Documents/\")\n my_data = local_provider[\"abc.txt\"]\n\n Args:\n path (str): The path relative to the root of the provider.\n\n Returns:\n bytes: The bytes of the object present at the path.\n\n Raises:\n KeyError: If an object is not found at the path.\n DirectoryAtPathException: If a directory is found at the path.\n Exception: Any other exception encountered while trying to fetch the object.\n \"\"\"\n try:\n full_path = self._check_is_file(path)\n with open(full_path, \"rb\") as file:\n return file.read()\n except DirectoryAtPathException:\n raise\n except FileNotFoundError:\n raise KeyError(path)\n\n def __setitem__(self, path: str, value: bytes):\n \"\"\"Sets the object present at the path with the value\n\n Example:\n local_provider = LocalProvider(\"/home/ubuntu/Documents/\")\n local_provider[\"abc.txt\"] = b\"abcd\"\n\n Args:\n path (str): the path relative to the root of the provider.\n value (bytes): the value to be assigned at the path.\n\n Raises:\n Exception: If unable to set item due to directory at path or permission or space issues.\n FileAtPathException: If the directory to the path is a file instead of a directory.\n ReadOnlyError: If the provider is in read-only mode.\n \"\"\"\n self.check_readonly()\n full_path = self._check_is_file(path)\n directory = os.path.dirname(full_path)\n if os.path.isfile(directory):\n raise FileAtPathException(directory)\n if not os.path.exists(directory):\n os.makedirs(directory, exist_ok=True)\n with open(full_path, \"wb\") as file:\n file.write(value)\n if self.files is not None:\n self.files.add(path)\n\n def __delitem__(self, path: str):\n \"\"\"Delete the object present at the path.\n\n Example:\n local_provider = LocalProvider(\"/home/ubuntu/Documents/\")\n del local_provider[\"abc.txt\"]\n\n Args:\n path (str): the path to the object relative to the root of the provider.\n\n Raises:\n KeyError: If an object is not found at the path.\n DirectoryAtPathException: If a directory is found at the path.\n Exception: Any other exception encountered while trying to fetch the object.\n ReadOnlyError: If the provider is in read-only mode.\n \"\"\"\n self.check_readonly()\n try:\n full_path = self._check_is_file(path)\n os.remove(full_path)\n if self.files is not None:\n self.files.discard(path)\n except DirectoryAtPathException:\n raise\n except FileNotFoundError:\n raise KeyError\n\n def __iter__(self):\n \"\"\"Generator function that iterates over the keys of the provider.\n\n Example:\n local_provider = LocalProvider(\"/home/ubuntu/Documents/\")\n for my_data in local_provider:\n pass\n\n Yields:\n str: the path of the object that it is iterating over, relative to the root of the provider.\n \"\"\"\n yield from self._all_keys()\n\n def __len__(self):\n \"\"\"Returns the number of files present inside the root of the provider.\n\n Example:\n local_provider = LocalProvider(\"/home/ubuntu/Documents/\")\n len(local_provider)\n\n Returns:\n int: the number of files present inside the root.\n \"\"\"\n return len(self._all_keys())\n\n def _all_keys(self, refresh: bool = False) -> Set[str]:\n \"\"\"Lists all the objects present at the root of the Provider.\n\n Args:\n refresh (bool): refresh keys\n\n Returns:\n set: set of all the objects found at the root of the Provider.\n \"\"\"\n if self.files is None or refresh:\n full_path = os.path.expanduser(self.root)\n key_set = set()\n for root, dirs, files in os.walk(full_path):\n for file in files:\n key_set.add(os.path.relpath(os.path.join(root, file), full_path))\n self.files = key_set\n return self.files\n\n def _check_is_file(self, path: str):\n \"\"\"Checks if the path is a file. Returns the full_path to file if True.\n\n Args:\n path (str): the path to the object relative to the root of the provider.\n\n Returns:\n str: the full path to the requested file.\n\n Raises:\n DirectoryAtPathException: If a directory is found at the path.\n \"\"\"\n full_path = os.path.join(self.root, path)\n full_path = os.path.expanduser(full_path)\n if os.path.isdir(full_path):\n raise DirectoryAtPathException\n return full_path\n\n def clear(self):\n \"\"\"Deletes ALL data on the local machine (under self.root). Exercise caution!\"\"\"\n self.check_readonly()\n self.files = set()\n full_path = os.path.expanduser(self.root)\n if os.path.exists(full_path):\n shutil.rmtree(full_path)\n\n def __contains__(self, key) -> bool:\n full_path = self._check_is_file(key)\n return os.path.exists(full_path)\n", "path": "hub/core/storage/local.py"}]} | 2,220 | 132 |
gh_patches_debug_23135 | rasdani/github-patches | git_diff | kornia__kornia-1543 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`RandomMotionBlur` fails when `kernel_size` is a `Tuple[int, int]`
### Describe the bug
Using the `RandomMotionBlur` augmentation with a tuple for `kernel_size` throws a `TypeError`. This appears to happen because internally `apply_transform` is trying to convert `kernel_size` to an `int` while receiving a collection of numbers
### Reproduction steps
```python
import kornia as K
import torch
x = torch.rand(3,3,224,224)
tfm = K.augmentation.RandomMotionBlur((7,21), (15.,15.), (-1.,1.), p=1)
tfm(x)
```
### Expected behavior
Successfully perform the transform with a different kernel size every batch
### Environment
```shell
wget https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py
# For security purposes, please check the contents of collect_env.py before running it.
python collect_env.py
```
- PyTorch Version (e.g., 1.0): 1.10.1
- OS (e.g., Linux): macOS 12.0.1 (x86_64)
- How you installed PyTorch (`conda`, `pip`, source): pip
- Python version: 3.7.10
- CUDA/cuDNN version: NA
- GPU models and configuration: NA
- Any other relevant information: NA
### Additional context
Full stack trace:
```python
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-4-a535d4587048> in <module>
----> 1 tfm(x)
~/miniconda3/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1101 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1102 return forward_call(*input, **kwargs)
1103 # Do not call functions when jit is used
1104 full_backward_hooks, non_full_backward_hooks = [], []
~/miniconda3/lib/python3.7/site-packages/kornia/augmentation/base.py in forward(self, input, params, return_transform)
268
269 self._params = params
--> 270 output = self.apply_func(in_tensor, in_transform, self._params, return_transform)
271 return _transform_output_shape(output, ori_shape) if self.keepdim else output
272
~/miniconda3/lib/python3.7/site-packages/kornia/augmentation/base.py in apply_func(self, in_tensor, in_transform, params, return_transform)
227 elif torch.sum(to_apply) == len(to_apply):
228 trans_matrix = self.compute_transformation(in_tensor, params)
--> 229 output = self.apply_transform(in_tensor, params, trans_matrix)
230 else:
231 output = in_tensor.clone()
~/miniconda3/lib/python3.7/site-packages/kornia/augmentation/augmentation.py in apply_transform(self, input, params, transform)
1400 self, input: torch.Tensor, params: Dict[str, torch.Tensor], transform: Optional[torch.Tensor] = None
1401 ) -> torch.Tensor:
-> 1402 kernel_size: int = cast(int, params["ksize_factor"].unique().item())
1403 angle = params["angle_factor"]
1404 direction = params["direction_factor"]
ValueError: only one element tensors can be converted to Python scalars
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kornia/augmentation/_2d/intensity/motion_blur.py`
Content:
```
1 from typing import Dict, Optional, Tuple, Union, cast
2
3 import torch
4
5 from kornia.augmentation import random_generator as rg
6 from kornia.augmentation._2d.intensity.base import IntensityAugmentationBase2D
7 from kornia.constants import BorderType, Resample
8 from kornia.filters import motion_blur
9
10
11 class RandomMotionBlur(IntensityAugmentationBase2D):
12 r"""Perform motion blur on 2D images (4D tensor).
13
14 .. image:: _static/img/RandomMotionBlur.png
15
16 Args:
17 p: probability of applying the transformation.
18 kernel_size: motion kernel size (odd and positive).
19 If int, the kernel will have a fixed size.
20 If Tuple[int, int], it will randomly generate the value from the range batch-wisely.
21 angle: angle of the motion blur in degrees (anti-clockwise rotation).
22 If float, it will generate the value from (-angle, angle).
23 direction: forward/backward direction of the motion blur.
24 Lower values towards -1.0 will point the motion blur towards the back (with angle provided via angle),
25 while higher values towards 1.0 will point the motion blur forward. A value of 0.0 leads to a
26 uniformly (but still angled) motion blur.
27 If float, it will generate the value from (-direction, direction).
28 If Tuple[int, int], it will randomly generate the value from the range.
29 border_type: the padding mode to be applied before convolving.
30 CONSTANT = 0, REFLECT = 1, REPLICATE = 2, CIRCULAR = 3.
31 resample: the interpolation mode.
32 keepdim: whether to keep the output shape the same as input (True) or broadcast it
33 to the batch form (False).
34
35 Shape:
36 - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`
37 - Output: :math:`(B, C, H, W)`
38
39 Note:
40 Input tensor must be float and normalized into [0, 1] for the best differentiability support.
41 Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the
42 applied transformation will be merged int to the input transformation tensor and returned.
43
44 Please set ``resample`` to ``'bilinear'`` if more meaningful gradients wanted.
45
46 .. note::
47 This function internally uses :func:`kornia.filters.motion_blur`.
48
49 Examples:
50 >>> rng = torch.manual_seed(0)
51 >>> input = torch.ones(1, 1, 5, 5)
52 >>> motion_blur = RandomMotionBlur(3, 35., 0.5, p=1.)
53 >>> motion_blur(input)
54 tensor([[[[0.5773, 1.0000, 1.0000, 1.0000, 0.7561],
55 [0.5773, 1.0000, 1.0000, 1.0000, 0.7561],
56 [0.5773, 1.0000, 1.0000, 1.0000, 0.7561],
57 [0.5773, 1.0000, 1.0000, 1.0000, 0.7561],
58 [0.5773, 1.0000, 1.0000, 1.0000, 0.7561]]]])
59
60 To apply the exact augmenation again, you may take the advantage of the previous parameter state:
61 >>> input = torch.randn(1, 3, 32, 32)
62 >>> aug = RandomMotionBlur(3, 35., 0.5, p=1.)
63 >>> (aug(input) == aug(input, params=aug._params)).all()
64 tensor(True)
65 """
66
67 def __init__(
68 self,
69 kernel_size: Union[int, Tuple[int, int]],
70 angle: Union[torch.Tensor, float, Tuple[float, float]],
71 direction: Union[torch.Tensor, float, Tuple[float, float]],
72 border_type: Union[int, str, BorderType] = BorderType.CONSTANT.name,
73 resample: Union[str, int, Resample] = Resample.NEAREST.name,
74 return_transform: bool = False,
75 same_on_batch: bool = False,
76 p: float = 0.5,
77 keepdim: bool = False,
78 ) -> None:
79 super().__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch, keepdim=keepdim)
80 self._param_generator = rg.MotionBlurGenerator(kernel_size, angle, direction)
81 self.flags = dict(border_type=BorderType.get(border_type), resample=Resample.get(resample))
82
83 def apply_transform(
84 self, input: torch.Tensor, params: Dict[str, torch.Tensor], transform: Optional[torch.Tensor] = None
85 ) -> torch.Tensor:
86 kernel_size: int = cast(int, params["ksize_factor"].unique().item())
87 angle = params["angle_factor"]
88 direction = params["direction_factor"]
89 return motion_blur(
90 input,
91 kernel_size,
92 angle,
93 direction,
94 border_type=self.flags["border_type"].name.lower(),
95 mode=self.flags["resample"].name.lower(),
96 )
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kornia/augmentation/_2d/intensity/motion_blur.py b/kornia/augmentation/_2d/intensity/motion_blur.py
--- a/kornia/augmentation/_2d/intensity/motion_blur.py
+++ b/kornia/augmentation/_2d/intensity/motion_blur.py
@@ -1,4 +1,4 @@
-from typing import Dict, Optional, Tuple, Union, cast
+from typing import Dict, List, Optional, Tuple, Union, cast
import torch
@@ -83,14 +83,14 @@
def apply_transform(
self, input: torch.Tensor, params: Dict[str, torch.Tensor], transform: Optional[torch.Tensor] = None
) -> torch.Tensor:
- kernel_size: int = cast(int, params["ksize_factor"].unique().item())
- angle = params["angle_factor"]
- direction = params["direction_factor"]
+ # sample a kernel size
+ kernel_size_list: List[int] = params["ksize_factor"].tolist()
+ idx: int = cast(int, torch.randint(len(kernel_size_list), (1,)).item())
return motion_blur(
input,
- kernel_size,
- angle,
- direction,
+ kernel_size=kernel_size_list[idx],
+ angle=params["angle_factor"],
+ direction=params["direction_factor"],
border_type=self.flags["border_type"].name.lower(),
mode=self.flags["resample"].name.lower(),
)
| {"golden_diff": "diff --git a/kornia/augmentation/_2d/intensity/motion_blur.py b/kornia/augmentation/_2d/intensity/motion_blur.py\n--- a/kornia/augmentation/_2d/intensity/motion_blur.py\n+++ b/kornia/augmentation/_2d/intensity/motion_blur.py\n@@ -1,4 +1,4 @@\n-from typing import Dict, Optional, Tuple, Union, cast\n+from typing import Dict, List, Optional, Tuple, Union, cast\n \n import torch\n \n@@ -83,14 +83,14 @@\n def apply_transform(\n self, input: torch.Tensor, params: Dict[str, torch.Tensor], transform: Optional[torch.Tensor] = None\n ) -> torch.Tensor:\n- kernel_size: int = cast(int, params[\"ksize_factor\"].unique().item())\n- angle = params[\"angle_factor\"]\n- direction = params[\"direction_factor\"]\n+ # sample a kernel size\n+ kernel_size_list: List[int] = params[\"ksize_factor\"].tolist()\n+ idx: int = cast(int, torch.randint(len(kernel_size_list), (1,)).item())\n return motion_blur(\n input,\n- kernel_size,\n- angle,\n- direction,\n+ kernel_size=kernel_size_list[idx],\n+ angle=params[\"angle_factor\"],\n+ direction=params[\"direction_factor\"],\n border_type=self.flags[\"border_type\"].name.lower(),\n mode=self.flags[\"resample\"].name.lower(),\n )\n", "issue": "`RandomMotionBlur` fails when `kernel_size` is a `Tuple[int, int]`\n### Describe the bug\r\n\r\nUsing the `RandomMotionBlur` augmentation with a tuple for `kernel_size` throws a `TypeError`. This appears to happen because internally `apply_transform` is trying to convert `kernel_size` to an `int` while receiving a collection of numbers\r\n\r\n### Reproduction steps\r\n\r\n```python\r\nimport kornia as K\r\nimport torch\r\n\r\nx = torch.rand(3,3,224,224)\r\ntfm = K.augmentation.RandomMotionBlur((7,21), (15.,15.), (-1.,1.), p=1)\r\n\r\ntfm(x)\r\n```\r\n\r\n\r\n### Expected behavior\r\n\r\nSuccessfully perform the transform with a different kernel size every batch\r\n\r\n### Environment\r\n\r\n```shell\r\nwget https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py\r\n# For security purposes, please check the contents of collect_env.py before running it.\r\npython collect_env.py\r\n```\r\n- PyTorch Version (e.g., 1.0): 1.10.1\r\n- OS (e.g., Linux): macOS 12.0.1 (x86_64)\r\n- How you installed PyTorch (`conda`, `pip`, source): pip\r\n- Python version: 3.7.10\r\n- CUDA/cuDNN version: NA\r\n- GPU models and configuration: NA\r\n- Any other relevant information: NA\r\n\r\n\r\n\r\n\r\n### Additional context\r\n\r\nFull stack trace:\r\n\r\n```python\r\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\n<ipython-input-4-a535d4587048> in <module>\r\n----> 1 tfm(x)\r\n\r\n~/miniconda3/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)\r\n 1100 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\r\n 1101 or _global_forward_hooks or _global_forward_pre_hooks):\r\n-> 1102 return forward_call(*input, **kwargs)\r\n 1103 # Do not call functions when jit is used\r\n 1104 full_backward_hooks, non_full_backward_hooks = [], []\r\n\r\n~/miniconda3/lib/python3.7/site-packages/kornia/augmentation/base.py in forward(self, input, params, return_transform)\r\n 268 \r\n 269 self._params = params\r\n--> 270 output = self.apply_func(in_tensor, in_transform, self._params, return_transform)\r\n 271 return _transform_output_shape(output, ori_shape) if self.keepdim else output\r\n 272 \r\n\r\n~/miniconda3/lib/python3.7/site-packages/kornia/augmentation/base.py in apply_func(self, in_tensor, in_transform, params, return_transform)\r\n 227 elif torch.sum(to_apply) == len(to_apply):\r\n 228 trans_matrix = self.compute_transformation(in_tensor, params)\r\n--> 229 output = self.apply_transform(in_tensor, params, trans_matrix)\r\n 230 else:\r\n 231 output = in_tensor.clone()\r\n\r\n~/miniconda3/lib/python3.7/site-packages/kornia/augmentation/augmentation.py in apply_transform(self, input, params, transform)\r\n 1400 self, input: torch.Tensor, params: Dict[str, torch.Tensor], transform: Optional[torch.Tensor] = None\r\n 1401 ) -> torch.Tensor:\r\n-> 1402 kernel_size: int = cast(int, params[\"ksize_factor\"].unique().item())\r\n 1403 angle = params[\"angle_factor\"]\r\n 1404 direction = params[\"direction_factor\"]\r\n\r\nValueError: only one element tensors can be converted to Python scalars\r\n```\n", "before_files": [{"content": "from typing import Dict, Optional, Tuple, Union, cast\n\nimport torch\n\nfrom kornia.augmentation import random_generator as rg\nfrom kornia.augmentation._2d.intensity.base import IntensityAugmentationBase2D\nfrom kornia.constants import BorderType, Resample\nfrom kornia.filters import motion_blur\n\n\nclass RandomMotionBlur(IntensityAugmentationBase2D):\n r\"\"\"Perform motion blur on 2D images (4D tensor).\n\n .. image:: _static/img/RandomMotionBlur.png\n\n Args:\n p: probability of applying the transformation.\n kernel_size: motion kernel size (odd and positive).\n If int, the kernel will have a fixed size.\n If Tuple[int, int], it will randomly generate the value from the range batch-wisely.\n angle: angle of the motion blur in degrees (anti-clockwise rotation).\n If float, it will generate the value from (-angle, angle).\n direction: forward/backward direction of the motion blur.\n Lower values towards -1.0 will point the motion blur towards the back (with angle provided via angle),\n while higher values towards 1.0 will point the motion blur forward. A value of 0.0 leads to a\n uniformly (but still angled) motion blur.\n If float, it will generate the value from (-direction, direction).\n If Tuple[int, int], it will randomly generate the value from the range.\n border_type: the padding mode to be applied before convolving.\n CONSTANT = 0, REFLECT = 1, REPLICATE = 2, CIRCULAR = 3.\n resample: the interpolation mode.\n keepdim: whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False).\n\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, H, W)`\n\n Note:\n Input tensor must be float and normalized into [0, 1] for the best differentiability support.\n Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the\n applied transformation will be merged int to the input transformation tensor and returned.\n\n Please set ``resample`` to ``'bilinear'`` if more meaningful gradients wanted.\n\n .. note::\n This function internally uses :func:`kornia.filters.motion_blur`.\n\n Examples:\n >>> rng = torch.manual_seed(0)\n >>> input = torch.ones(1, 1, 5, 5)\n >>> motion_blur = RandomMotionBlur(3, 35., 0.5, p=1.)\n >>> motion_blur(input)\n tensor([[[[0.5773, 1.0000, 1.0000, 1.0000, 0.7561],\n [0.5773, 1.0000, 1.0000, 1.0000, 0.7561],\n [0.5773, 1.0000, 1.0000, 1.0000, 0.7561],\n [0.5773, 1.0000, 1.0000, 1.0000, 0.7561],\n [0.5773, 1.0000, 1.0000, 1.0000, 0.7561]]]])\n\n To apply the exact augmenation again, you may take the advantage of the previous parameter state:\n >>> input = torch.randn(1, 3, 32, 32)\n >>> aug = RandomMotionBlur(3, 35., 0.5, p=1.)\n >>> (aug(input) == aug(input, params=aug._params)).all()\n tensor(True)\n \"\"\"\n\n def __init__(\n self,\n kernel_size: Union[int, Tuple[int, int]],\n angle: Union[torch.Tensor, float, Tuple[float, float]],\n direction: Union[torch.Tensor, float, Tuple[float, float]],\n border_type: Union[int, str, BorderType] = BorderType.CONSTANT.name,\n resample: Union[str, int, Resample] = Resample.NEAREST.name,\n return_transform: bool = False,\n same_on_batch: bool = False,\n p: float = 0.5,\n keepdim: bool = False,\n ) -> None:\n super().__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch, keepdim=keepdim)\n self._param_generator = rg.MotionBlurGenerator(kernel_size, angle, direction)\n self.flags = dict(border_type=BorderType.get(border_type), resample=Resample.get(resample))\n\n def apply_transform(\n self, input: torch.Tensor, params: Dict[str, torch.Tensor], transform: Optional[torch.Tensor] = None\n ) -> torch.Tensor:\n kernel_size: int = cast(int, params[\"ksize_factor\"].unique().item())\n angle = params[\"angle_factor\"]\n direction = params[\"direction_factor\"]\n return motion_blur(\n input,\n kernel_size,\n angle,\n direction,\n border_type=self.flags[\"border_type\"].name.lower(),\n mode=self.flags[\"resample\"].name.lower(),\n )\n", "path": "kornia/augmentation/_2d/intensity/motion_blur.py"}], "after_files": [{"content": "from typing import Dict, List, Optional, Tuple, Union, cast\n\nimport torch\n\nfrom kornia.augmentation import random_generator as rg\nfrom kornia.augmentation._2d.intensity.base import IntensityAugmentationBase2D\nfrom kornia.constants import BorderType, Resample\nfrom kornia.filters import motion_blur\n\n\nclass RandomMotionBlur(IntensityAugmentationBase2D):\n r\"\"\"Perform motion blur on 2D images (4D tensor).\n\n .. image:: _static/img/RandomMotionBlur.png\n\n Args:\n p: probability of applying the transformation.\n kernel_size: motion kernel size (odd and positive).\n If int, the kernel will have a fixed size.\n If Tuple[int, int], it will randomly generate the value from the range batch-wisely.\n angle: angle of the motion blur in degrees (anti-clockwise rotation).\n If float, it will generate the value from (-angle, angle).\n direction: forward/backward direction of the motion blur.\n Lower values towards -1.0 will point the motion blur towards the back (with angle provided via angle),\n while higher values towards 1.0 will point the motion blur forward. A value of 0.0 leads to a\n uniformly (but still angled) motion blur.\n If float, it will generate the value from (-direction, direction).\n If Tuple[int, int], it will randomly generate the value from the range.\n border_type: the padding mode to be applied before convolving.\n CONSTANT = 0, REFLECT = 1, REPLICATE = 2, CIRCULAR = 3.\n resample: the interpolation mode.\n keepdim: whether to keep the output shape the same as input (True) or broadcast it\n to the batch form (False).\n\n Shape:\n - Input: :math:`(C, H, W)` or :math:`(B, C, H, W)`, Optional: :math:`(B, 3, 3)`\n - Output: :math:`(B, C, H, W)`\n\n Note:\n Input tensor must be float and normalized into [0, 1] for the best differentiability support.\n Additionally, this function accepts another transformation tensor (:math:`(B, 3, 3)`), then the\n applied transformation will be merged int to the input transformation tensor and returned.\n\n Please set ``resample`` to ``'bilinear'`` if more meaningful gradients wanted.\n\n .. note::\n This function internally uses :func:`kornia.filters.motion_blur`.\n\n Examples:\n >>> rng = torch.manual_seed(0)\n >>> input = torch.ones(1, 1, 5, 5)\n >>> motion_blur = RandomMotionBlur(3, 35., 0.5, p=1.)\n >>> motion_blur(input)\n tensor([[[[0.5773, 1.0000, 1.0000, 1.0000, 0.7561],\n [0.5773, 1.0000, 1.0000, 1.0000, 0.7561],\n [0.5773, 1.0000, 1.0000, 1.0000, 0.7561],\n [0.5773, 1.0000, 1.0000, 1.0000, 0.7561],\n [0.5773, 1.0000, 1.0000, 1.0000, 0.7561]]]])\n\n To apply the exact augmenation again, you may take the advantage of the previous parameter state:\n >>> input = torch.randn(1, 3, 32, 32)\n >>> aug = RandomMotionBlur(3, 35., 0.5, p=1.)\n >>> (aug(input) == aug(input, params=aug._params)).all()\n tensor(True)\n \"\"\"\n\n def __init__(\n self,\n kernel_size: Union[int, Tuple[int, int]],\n angle: Union[torch.Tensor, float, Tuple[float, float]],\n direction: Union[torch.Tensor, float, Tuple[float, float]],\n border_type: Union[int, str, BorderType] = BorderType.CONSTANT.name,\n resample: Union[str, int, Resample] = Resample.NEAREST.name,\n return_transform: bool = False,\n same_on_batch: bool = False,\n p: float = 0.5,\n keepdim: bool = False,\n ) -> None:\n super().__init__(p=p, return_transform=return_transform, same_on_batch=same_on_batch, keepdim=keepdim)\n self._param_generator = rg.MotionBlurGenerator(kernel_size, angle, direction)\n self.flags = dict(border_type=BorderType.get(border_type), resample=Resample.get(resample))\n\n def apply_transform(\n self, input: torch.Tensor, params: Dict[str, torch.Tensor], transform: Optional[torch.Tensor] = None\n ) -> torch.Tensor:\n # sample a kernel size\n kernel_size_list: List[int] = params[\"ksize_factor\"].tolist()\n idx: int = cast(int, torch.randint(len(kernel_size_list), (1,)).item())\n return motion_blur(\n input,\n kernel_size=kernel_size_list[idx],\n angle=params[\"angle_factor\"],\n direction=params[\"direction_factor\"],\n border_type=self.flags[\"border_type\"].name.lower(),\n mode=self.flags[\"resample\"].name.lower(),\n )\n", "path": "kornia/augmentation/_2d/intensity/motion_blur.py"}]} | 2,554 | 328 |
gh_patches_debug_16402 | rasdani/github-patches | git_diff | fonttools__fonttools-717 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
hmtx code should round values
Currently, float values result in exception:
```
self._writeTable(masterTable, writer, done)
File "/usr/local/lib/python2.7/dist-packages/fonttools-3.0-py2.7.egg/fontTools/ttLib/__init__.py", line 648, in _writeTable
tabledata = self.getTableData(tag)
File "/usr/local/lib/python2.7/dist-packages/fonttools-3.0-py2.7.egg/fontTools/ttLib/__init__.py", line 659, in getTableData
return self.tables[tag].compile(self)
File "/usr/local/lib/python2.7/dist-packages/fonttools-3.0-py2.7.egg/fontTools/ttLib/tables/_h_m_t_x.py", line 68, in compile
allMetrics = array.array("h", allMetrics)
TypeError: integer argument expected, got float
```
Possibly warn? Though it will become annoying if we do.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Lib/fontTools/ttLib/tables/_h_m_t_x.py`
Content:
```
1 from __future__ import print_function, division, absolute_import
2 from fontTools.misc.py23 import *
3 from fontTools import ttLib
4 from fontTools.misc.textTools import safeEval
5 from . import DefaultTable
6 import sys
7 import struct
8 import array
9 import logging
10
11
12 log = logging.getLogger(__name__)
13
14
15 class table__h_m_t_x(DefaultTable.DefaultTable):
16
17 headerTag = 'hhea'
18 advanceName = 'width'
19 sideBearingName = 'lsb'
20 numberOfMetricsName = 'numberOfHMetrics'
21 longMetricFormat = 'Hh'
22
23 def decompile(self, data, ttFont):
24 numGlyphs = ttFont['maxp'].numGlyphs
25 numberOfMetrics = int(getattr(ttFont[self.headerTag], self.numberOfMetricsName))
26 if numberOfMetrics > numGlyphs:
27 log.warning("The %s.%s exceeds the maxp.numGlyphs" % (
28 self.headerTag, self.numberOfMetricsName))
29 numberOfMetrics = numGlyphs
30 if len(data) < 4 * numberOfMetrics:
31 raise ttLib.TTLibError("not enough '%s' table data" % self.tableTag)
32 # Note: advanceWidth is unsigned, but some font editors might
33 # read/write as signed. We can't be sure whether it was a mistake
34 # or not, so we read as unsigned but also issue a warning...
35 metricsFmt = ">" + self.longMetricFormat * numberOfMetrics
36 metrics = struct.unpack(metricsFmt, data[:4 * numberOfMetrics])
37 data = data[4 * numberOfMetrics:]
38 numberOfSideBearings = numGlyphs - numberOfMetrics
39 sideBearings = array.array("h", data[:2 * numberOfSideBearings])
40 data = data[2 * numberOfSideBearings:]
41
42 if sys.byteorder != "big":
43 sideBearings.byteswap()
44 if data:
45 log.warning("too much '%s' table data" % self.tableTag)
46 self.metrics = {}
47 glyphOrder = ttFont.getGlyphOrder()
48 for i in range(numberOfMetrics):
49 glyphName = glyphOrder[i]
50 advanceWidth, lsb = metrics[i*2:i*2+2]
51 if advanceWidth > 32767:
52 log.warning(
53 "Glyph %r has a huge advance %s (%d); is it intentional or "
54 "an (invalid) negative value?", glyphName, self.advanceName,
55 advanceWidth)
56 self.metrics[glyphName] = (advanceWidth, lsb)
57 lastAdvance = metrics[-2]
58 for i in range(numberOfSideBearings):
59 glyphName = glyphOrder[i + numberOfMetrics]
60 self.metrics[glyphName] = (lastAdvance, sideBearings[i])
61
62 def compile(self, ttFont):
63 metrics = []
64 hasNegativeAdvances = False
65 for glyphName in ttFont.getGlyphOrder():
66 advanceWidth, sideBearing = self.metrics[glyphName]
67 if advanceWidth < 0:
68 log.error("Glyph %r has negative advance %s" % (
69 glyphName, self.advanceName))
70 hasNegativeAdvances = True
71 metrics.append([advanceWidth, sideBearing])
72 lastAdvance = metrics[-1][0]
73 lastIndex = len(metrics)
74 while metrics[lastIndex-2][0] == lastAdvance:
75 lastIndex -= 1
76 if lastIndex <= 1:
77 # all advances are equal
78 lastIndex = 1
79 break
80 additionalMetrics = metrics[lastIndex:]
81 additionalMetrics = [sb for advance, sb in additionalMetrics]
82 metrics = metrics[:lastIndex]
83 numberOfMetrics = len(metrics)
84 setattr(ttFont[self.headerTag], self.numberOfMetricsName, numberOfMetrics)
85
86 allMetrics = []
87 for item in metrics:
88 allMetrics.extend(item)
89 metricsFmt = ">" + self.longMetricFormat * numberOfMetrics
90 try:
91 data = struct.pack(metricsFmt, *allMetrics)
92 except struct.error as e:
93 if "out of range" in str(e) and hasNegativeAdvances:
94 raise ttLib.TTLibError(
95 "'%s' table can't contain negative advance %ss"
96 % (self.tableTag, self.advanceName))
97 else:
98 raise
99 additionalMetrics = array.array("h", additionalMetrics)
100 if sys.byteorder != "big":
101 additionalMetrics.byteswap()
102 data = data + additionalMetrics.tostring()
103 return data
104
105 def toXML(self, writer, ttFont):
106 names = sorted(self.metrics.keys())
107 for glyphName in names:
108 advance, sb = self.metrics[glyphName]
109 writer.simpletag("mtx", [
110 ("name", glyphName),
111 (self.advanceName, advance),
112 (self.sideBearingName, sb),
113 ])
114 writer.newline()
115
116 def fromXML(self, name, attrs, content, ttFont):
117 if not hasattr(self, "metrics"):
118 self.metrics = {}
119 if name == "mtx":
120 self.metrics[attrs["name"]] = (safeEval(attrs[self.advanceName]),
121 safeEval(attrs[self.sideBearingName]))
122
123 def __delitem__(self, glyphName):
124 del self.metrics[glyphName]
125
126 def __getitem__(self, glyphName):
127 return self.metrics[glyphName]
128
129 def __setitem__(self, glyphName, advance_sb_pair):
130 self.metrics[glyphName] = tuple(advance_sb_pair)
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Lib/fontTools/ttLib/tables/_h_m_t_x.py b/Lib/fontTools/ttLib/tables/_h_m_t_x.py
--- a/Lib/fontTools/ttLib/tables/_h_m_t_x.py
+++ b/Lib/fontTools/ttLib/tables/_h_m_t_x.py
@@ -78,14 +78,14 @@
lastIndex = 1
break
additionalMetrics = metrics[lastIndex:]
- additionalMetrics = [sb for advance, sb in additionalMetrics]
+ additionalMetrics = [int(round(sb)) for _, sb in additionalMetrics]
metrics = metrics[:lastIndex]
numberOfMetrics = len(metrics)
setattr(ttFont[self.headerTag], self.numberOfMetricsName, numberOfMetrics)
allMetrics = []
- for item in metrics:
- allMetrics.extend(item)
+ for advance, sb in metrics:
+ allMetrics.extend([int(round(advance)), int(round(sb))])
metricsFmt = ">" + self.longMetricFormat * numberOfMetrics
try:
data = struct.pack(metricsFmt, *allMetrics)
| {"golden_diff": "diff --git a/Lib/fontTools/ttLib/tables/_h_m_t_x.py b/Lib/fontTools/ttLib/tables/_h_m_t_x.py\n--- a/Lib/fontTools/ttLib/tables/_h_m_t_x.py\n+++ b/Lib/fontTools/ttLib/tables/_h_m_t_x.py\n@@ -78,14 +78,14 @@\n \t\t\t\tlastIndex = 1\n \t\t\t\tbreak\n \t\tadditionalMetrics = metrics[lastIndex:]\n-\t\tadditionalMetrics = [sb for advance, sb in additionalMetrics]\n+\t\tadditionalMetrics = [int(round(sb)) for _, sb in additionalMetrics]\n \t\tmetrics = metrics[:lastIndex]\n \t\tnumberOfMetrics = len(metrics)\n \t\tsetattr(ttFont[self.headerTag], self.numberOfMetricsName, numberOfMetrics)\n \n \t\tallMetrics = []\n-\t\tfor item in metrics:\n-\t\t\tallMetrics.extend(item)\n+\t\tfor advance, sb in metrics:\n+\t\t\tallMetrics.extend([int(round(advance)), int(round(sb))])\n \t\tmetricsFmt = \">\" + self.longMetricFormat * numberOfMetrics\n \t\ttry:\n \t\t\tdata = struct.pack(metricsFmt, *allMetrics)\n", "issue": "hmtx code should round values\nCurrently, float values result in exception:\n\n```\n self._writeTable(masterTable, writer, done)\n File \"/usr/local/lib/python2.7/dist-packages/fonttools-3.0-py2.7.egg/fontTools/ttLib/__init__.py\", line 648, in _writeTable\n tabledata = self.getTableData(tag)\n File \"/usr/local/lib/python2.7/dist-packages/fonttools-3.0-py2.7.egg/fontTools/ttLib/__init__.py\", line 659, in getTableData\n return self.tables[tag].compile(self)\n File \"/usr/local/lib/python2.7/dist-packages/fonttools-3.0-py2.7.egg/fontTools/ttLib/tables/_h_m_t_x.py\", line 68, in compile\n allMetrics = array.array(\"h\", allMetrics)\nTypeError: integer argument expected, got float\n```\n\nPossibly warn? Though it will become annoying if we do.\n\n", "before_files": [{"content": "from __future__ import print_function, division, absolute_import\nfrom fontTools.misc.py23 import *\nfrom fontTools import ttLib\nfrom fontTools.misc.textTools import safeEval\nfrom . import DefaultTable\nimport sys\nimport struct\nimport array\nimport logging\n\n\nlog = logging.getLogger(__name__)\n\n\nclass table__h_m_t_x(DefaultTable.DefaultTable):\n\n\theaderTag = 'hhea'\n\tadvanceName = 'width'\n\tsideBearingName = 'lsb'\n\tnumberOfMetricsName = 'numberOfHMetrics'\n\tlongMetricFormat = 'Hh'\n\n\tdef decompile(self, data, ttFont):\n\t\tnumGlyphs = ttFont['maxp'].numGlyphs\n\t\tnumberOfMetrics = int(getattr(ttFont[self.headerTag], self.numberOfMetricsName))\n\t\tif numberOfMetrics > numGlyphs:\n\t\t\tlog.warning(\"The %s.%s exceeds the maxp.numGlyphs\" % (\n\t\t\t\tself.headerTag, self.numberOfMetricsName))\n\t\t\tnumberOfMetrics = numGlyphs\n\t\tif len(data) < 4 * numberOfMetrics:\n\t\t\traise ttLib.TTLibError(\"not enough '%s' table data\" % self.tableTag)\n\t\t# Note: advanceWidth is unsigned, but some font editors might\n\t\t# read/write as signed. We can't be sure whether it was a mistake\n\t\t# or not, so we read as unsigned but also issue a warning...\n\t\tmetricsFmt = \">\" + self.longMetricFormat * numberOfMetrics\n\t\tmetrics = struct.unpack(metricsFmt, data[:4 * numberOfMetrics])\n\t\tdata = data[4 * numberOfMetrics:]\n\t\tnumberOfSideBearings = numGlyphs - numberOfMetrics\n\t\tsideBearings = array.array(\"h\", data[:2 * numberOfSideBearings])\n\t\tdata = data[2 * numberOfSideBearings:]\n\n\t\tif sys.byteorder != \"big\":\n\t\t\tsideBearings.byteswap()\n\t\tif data:\n\t\t\tlog.warning(\"too much '%s' table data\" % self.tableTag)\n\t\tself.metrics = {}\n\t\tglyphOrder = ttFont.getGlyphOrder()\n\t\tfor i in range(numberOfMetrics):\n\t\t\tglyphName = glyphOrder[i]\n\t\t\tadvanceWidth, lsb = metrics[i*2:i*2+2]\n\t\t\tif advanceWidth > 32767:\n\t\t\t\tlog.warning(\n\t\t\t\t\t\"Glyph %r has a huge advance %s (%d); is it intentional or \"\n\t\t\t\t\t\"an (invalid) negative value?\", glyphName, self.advanceName,\n\t\t\t\t\tadvanceWidth)\n\t\t\tself.metrics[glyphName] = (advanceWidth, lsb)\n\t\tlastAdvance = metrics[-2]\n\t\tfor i in range(numberOfSideBearings):\n\t\t\tglyphName = glyphOrder[i + numberOfMetrics]\n\t\t\tself.metrics[glyphName] = (lastAdvance, sideBearings[i])\n\n\tdef compile(self, ttFont):\n\t\tmetrics = []\n\t\thasNegativeAdvances = False\n\t\tfor glyphName in ttFont.getGlyphOrder():\n\t\t\tadvanceWidth, sideBearing = self.metrics[glyphName]\n\t\t\tif advanceWidth < 0:\n\t\t\t\tlog.error(\"Glyph %r has negative advance %s\" % (\n\t\t\t\t\tglyphName, self.advanceName))\n\t\t\t\thasNegativeAdvances = True\n\t\t\tmetrics.append([advanceWidth, sideBearing])\n\t\tlastAdvance = metrics[-1][0]\n\t\tlastIndex = len(metrics)\n\t\twhile metrics[lastIndex-2][0] == lastAdvance:\n\t\t\tlastIndex -= 1\n\t\t\tif lastIndex <= 1:\n\t\t\t\t# all advances are equal\n\t\t\t\tlastIndex = 1\n\t\t\t\tbreak\n\t\tadditionalMetrics = metrics[lastIndex:]\n\t\tadditionalMetrics = [sb for advance, sb in additionalMetrics]\n\t\tmetrics = metrics[:lastIndex]\n\t\tnumberOfMetrics = len(metrics)\n\t\tsetattr(ttFont[self.headerTag], self.numberOfMetricsName, numberOfMetrics)\n\n\t\tallMetrics = []\n\t\tfor item in metrics:\n\t\t\tallMetrics.extend(item)\n\t\tmetricsFmt = \">\" + self.longMetricFormat * numberOfMetrics\n\t\ttry:\n\t\t\tdata = struct.pack(metricsFmt, *allMetrics)\n\t\texcept struct.error as e:\n\t\t\tif \"out of range\" in str(e) and hasNegativeAdvances:\n\t\t\t\traise ttLib.TTLibError(\n\t\t\t\t\t\"'%s' table can't contain negative advance %ss\"\n\t\t\t\t\t% (self.tableTag, self.advanceName))\n\t\t\telse:\n\t\t\t\traise\n\t\tadditionalMetrics = array.array(\"h\", additionalMetrics)\n\t\tif sys.byteorder != \"big\":\n\t\t\tadditionalMetrics.byteswap()\n\t\tdata = data + additionalMetrics.tostring()\n\t\treturn data\n\n\tdef toXML(self, writer, ttFont):\n\t\tnames = sorted(self.metrics.keys())\n\t\tfor glyphName in names:\n\t\t\tadvance, sb = self.metrics[glyphName]\n\t\t\twriter.simpletag(\"mtx\", [\n\t\t\t\t\t(\"name\", glyphName),\n\t\t\t\t\t(self.advanceName, advance),\n\t\t\t\t\t(self.sideBearingName, sb),\n\t\t\t\t\t])\n\t\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tif not hasattr(self, \"metrics\"):\n\t\t\tself.metrics = {}\n\t\tif name == \"mtx\":\n\t\t\tself.metrics[attrs[\"name\"]] = (safeEval(attrs[self.advanceName]),\n\t\t\t\t\tsafeEval(attrs[self.sideBearingName]))\n\n\tdef __delitem__(self, glyphName):\n\t\tdel self.metrics[glyphName]\n\n\tdef __getitem__(self, glyphName):\n\t\treturn self.metrics[glyphName]\n\n\tdef __setitem__(self, glyphName, advance_sb_pair):\n\t\tself.metrics[glyphName] = tuple(advance_sb_pair)\n", "path": "Lib/fontTools/ttLib/tables/_h_m_t_x.py"}], "after_files": [{"content": "from __future__ import print_function, division, absolute_import\nfrom fontTools.misc.py23 import *\nfrom fontTools import ttLib\nfrom fontTools.misc.textTools import safeEval\nfrom . import DefaultTable\nimport sys\nimport struct\nimport array\nimport logging\n\n\nlog = logging.getLogger(__name__)\n\n\nclass table__h_m_t_x(DefaultTable.DefaultTable):\n\n\theaderTag = 'hhea'\n\tadvanceName = 'width'\n\tsideBearingName = 'lsb'\n\tnumberOfMetricsName = 'numberOfHMetrics'\n\tlongMetricFormat = 'Hh'\n\n\tdef decompile(self, data, ttFont):\n\t\tnumGlyphs = ttFont['maxp'].numGlyphs\n\t\tnumberOfMetrics = int(getattr(ttFont[self.headerTag], self.numberOfMetricsName))\n\t\tif numberOfMetrics > numGlyphs:\n\t\t\tlog.warning(\"The %s.%s exceeds the maxp.numGlyphs\" % (\n\t\t\t\tself.headerTag, self.numberOfMetricsName))\n\t\t\tnumberOfMetrics = numGlyphs\n\t\tif len(data) < 4 * numberOfMetrics:\n\t\t\traise ttLib.TTLibError(\"not enough '%s' table data\" % self.tableTag)\n\t\t# Note: advanceWidth is unsigned, but some font editors might\n\t\t# read/write as signed. We can't be sure whether it was a mistake\n\t\t# or not, so we read as unsigned but also issue a warning...\n\t\tmetricsFmt = \">\" + self.longMetricFormat * numberOfMetrics\n\t\tmetrics = struct.unpack(metricsFmt, data[:4 * numberOfMetrics])\n\t\tdata = data[4 * numberOfMetrics:]\n\t\tnumberOfSideBearings = numGlyphs - numberOfMetrics\n\t\tsideBearings = array.array(\"h\", data[:2 * numberOfSideBearings])\n\t\tdata = data[2 * numberOfSideBearings:]\n\n\t\tif sys.byteorder != \"big\":\n\t\t\tsideBearings.byteswap()\n\t\tif data:\n\t\t\tlog.warning(\"too much '%s' table data\" % self.tableTag)\n\t\tself.metrics = {}\n\t\tglyphOrder = ttFont.getGlyphOrder()\n\t\tfor i in range(numberOfMetrics):\n\t\t\tglyphName = glyphOrder[i]\n\t\t\tadvanceWidth, lsb = metrics[i*2:i*2+2]\n\t\t\tif advanceWidth > 32767:\n\t\t\t\tlog.warning(\n\t\t\t\t\t\"Glyph %r has a huge advance %s (%d); is it intentional or \"\n\t\t\t\t\t\"an (invalid) negative value?\", glyphName, self.advanceName,\n\t\t\t\t\tadvanceWidth)\n\t\t\tself.metrics[glyphName] = (advanceWidth, lsb)\n\t\tlastAdvance = metrics[-2]\n\t\tfor i in range(numberOfSideBearings):\n\t\t\tglyphName = glyphOrder[i + numberOfMetrics]\n\t\t\tself.metrics[glyphName] = (lastAdvance, sideBearings[i])\n\n\tdef compile(self, ttFont):\n\t\tmetrics = []\n\t\thasNegativeAdvances = False\n\t\tfor glyphName in ttFont.getGlyphOrder():\n\t\t\tadvanceWidth, sideBearing = self.metrics[glyphName]\n\t\t\tif advanceWidth < 0:\n\t\t\t\tlog.error(\"Glyph %r has negative advance %s\" % (\n\t\t\t\t\tglyphName, self.advanceName))\n\t\t\t\thasNegativeAdvances = True\n\t\t\tmetrics.append([advanceWidth, sideBearing])\n\t\tlastAdvance = metrics[-1][0]\n\t\tlastIndex = len(metrics)\n\t\twhile metrics[lastIndex-2][0] == lastAdvance:\n\t\t\tlastIndex -= 1\n\t\t\tif lastIndex <= 1:\n\t\t\t\t# all advances are equal\n\t\t\t\tlastIndex = 1\n\t\t\t\tbreak\n\t\tadditionalMetrics = metrics[lastIndex:]\n\t\tadditionalMetrics = [int(round(sb)) for _, sb in additionalMetrics]\n\t\tmetrics = metrics[:lastIndex]\n\t\tnumberOfMetrics = len(metrics)\n\t\tsetattr(ttFont[self.headerTag], self.numberOfMetricsName, numberOfMetrics)\n\n\t\tallMetrics = []\n\t\tfor advance, sb in metrics:\n\t\t\tallMetrics.extend([int(round(advance)), int(round(sb))])\n\t\tmetricsFmt = \">\" + self.longMetricFormat * numberOfMetrics\n\t\ttry:\n\t\t\tdata = struct.pack(metricsFmt, *allMetrics)\n\t\texcept struct.error as e:\n\t\t\tif \"out of range\" in str(e) and hasNegativeAdvances:\n\t\t\t\traise ttLib.TTLibError(\n\t\t\t\t\t\"'%s' table can't contain negative advance %ss\"\n\t\t\t\t\t% (self.tableTag, self.advanceName))\n\t\t\telse:\n\t\t\t\traise\n\t\tadditionalMetrics = array.array(\"h\", additionalMetrics)\n\t\tif sys.byteorder != \"big\":\n\t\t\tadditionalMetrics.byteswap()\n\t\tdata = data + additionalMetrics.tostring()\n\t\treturn data\n\n\tdef toXML(self, writer, ttFont):\n\t\tnames = sorted(self.metrics.keys())\n\t\tfor glyphName in names:\n\t\t\tadvance, sb = self.metrics[glyphName]\n\t\t\twriter.simpletag(\"mtx\", [\n\t\t\t\t\t(\"name\", glyphName),\n\t\t\t\t\t(self.advanceName, advance),\n\t\t\t\t\t(self.sideBearingName, sb),\n\t\t\t\t\t])\n\t\t\twriter.newline()\n\n\tdef fromXML(self, name, attrs, content, ttFont):\n\t\tif not hasattr(self, \"metrics\"):\n\t\t\tself.metrics = {}\n\t\tif name == \"mtx\":\n\t\t\tself.metrics[attrs[\"name\"]] = (safeEval(attrs[self.advanceName]),\n\t\t\t\t\tsafeEval(attrs[self.sideBearingName]))\n\n\tdef __delitem__(self, glyphName):\n\t\tdel self.metrics[glyphName]\n\n\tdef __getitem__(self, glyphName):\n\t\treturn self.metrics[glyphName]\n\n\tdef __setitem__(self, glyphName, advance_sb_pair):\n\t\tself.metrics[glyphName] = tuple(advance_sb_pair)\n", "path": "Lib/fontTools/ttLib/tables/_h_m_t_x.py"}]} | 1,972 | 248 |
gh_patches_debug_5813 | rasdani/github-patches | git_diff | saleor__saleor-2087 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing variable in "fulfillment" email
Two small issues in the "fulfillment" email:
- logo is missing
- in footer there is missing template variable
I've just tested it and this is how the email looks like:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/order/emails.py`
Content:
```
1 from celery import shared_task
2 from django.conf import settings
3 from django.contrib.sites.models import Site
4 from django.urls import reverse
5 from templated_email import send_templated_mail
6
7 from ..core.utils import build_absolute_uri
8 from ..seo.schema.email import get_order_confirmation_markup
9 from .models import Fulfillment, Order
10
11 CONFIRM_ORDER_TEMPLATE = 'source/order/confirm_order'
12 CONFIRM_FULFILLMENT_TEMPLATE = 'source/order/confirm_fulfillment'
13 UPDATE_FULFILLMENT_TEMPLATE = 'source/order/update_fulfillment'
14 CONFIRM_PAYMENT_TEMPLATE = 'source/order/payment/confirm_payment'
15 CONFIRM_NOTE_TEMPLATE = 'source/order/note/confirm_note'
16
17
18 def get_email_context(order_token):
19 """Prepares context required for email template rendering."""
20 site = Site.objects.get_current()
21 order_url = build_absolute_uri(
22 reverse('order:details', kwargs={'token': order_token}))
23 ctx = {
24 'protocol': 'https' if settings.ENABLE_SSL else 'http',
25 'site_name': site.name,
26 'domain': site.domain,
27 'url': order_url}
28 return ctx
29
30
31 def collect_data_for_email(order_pk, template):
32 """Collects data required for email sending.
33
34 Args:
35 order_pk (int): order primary key
36 template (str): email template path
37 """
38 order = Order.objects.get(pk=order_pk)
39 recipient_email = order.get_user_current_email()
40 email_context = get_email_context(order.token)
41
42 # Order confirmation template requires additional information
43 if template == CONFIRM_ORDER_TEMPLATE:
44 email_markup = get_order_confirmation_markup(order)
45 email_context.update(
46 {'order': order, 'schema_markup': email_markup})
47
48 return {
49 'recipient_list': [recipient_email], 'template_name': template,
50 'context': email_context, 'from_email': settings.ORDER_FROM_EMAIL}
51
52
53 def collect_data_for_fullfillment_email(order_pk, template, fulfillment_pk):
54 fulfillment = Fulfillment.objects.get(pk=fulfillment_pk)
55 email_data = collect_data_for_email(order_pk, template)
56 email_data.update({'context': {'fulfillment': fulfillment}})
57 return email_data
58
59
60 @shared_task
61 def send_order_confirmation(order_pk):
62 """Sends order confirmation email."""
63 email_data = collect_data_for_email(order_pk, CONFIRM_ORDER_TEMPLATE)
64 send_templated_mail(**email_data)
65
66
67 @shared_task
68 def send_fulfillment_confirmation(order_pk, fulfillment_pk):
69 email_data = collect_data_for_fullfillment_email(
70 order_pk, CONFIRM_FULFILLMENT_TEMPLATE, fulfillment_pk)
71 send_templated_mail(**email_data)
72
73
74 @shared_task
75 def send_fulfillment_update(order_pk, fulfillment_pk):
76 email_data = collect_data_for_fullfillment_email(
77 order_pk, UPDATE_FULFILLMENT_TEMPLATE, fulfillment_pk)
78 send_templated_mail(**email_data)
79
80
81 @shared_task
82 def send_payment_confirmation(order_pk):
83 """Sends payment confirmation email."""
84 email_data = collect_data_for_email(order_pk, CONFIRM_PAYMENT_TEMPLATE)
85 send_templated_mail(**email_data)
86
87
88 @shared_task
89 def send_note_confirmation(order_pk):
90 """Notifies customer, when new note was added to an order."""
91 email_data = collect_data_for_email(order_pk, CONFIRM_NOTE_TEMPLATE)
92 send_templated_mail(**email_data)
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/order/emails.py b/saleor/order/emails.py
--- a/saleor/order/emails.py
+++ b/saleor/order/emails.py
@@ -53,7 +53,7 @@
def collect_data_for_fullfillment_email(order_pk, template, fulfillment_pk):
fulfillment = Fulfillment.objects.get(pk=fulfillment_pk)
email_data = collect_data_for_email(order_pk, template)
- email_data.update({'context': {'fulfillment': fulfillment}})
+ email_data['context'].update({'fulfillment': fulfillment})
return email_data
| {"golden_diff": "diff --git a/saleor/order/emails.py b/saleor/order/emails.py\n--- a/saleor/order/emails.py\n+++ b/saleor/order/emails.py\n@@ -53,7 +53,7 @@\n def collect_data_for_fullfillment_email(order_pk, template, fulfillment_pk):\n fulfillment = Fulfillment.objects.get(pk=fulfillment_pk)\n email_data = collect_data_for_email(order_pk, template)\n- email_data.update({'context': {'fulfillment': fulfillment}})\n+ email_data['context'].update({'fulfillment': fulfillment})\n return email_data\n", "issue": "Missing variable in \"fulfillment\" email\nTwo small issues in the \"fulfillment\" email:\r\n- logo is missing\r\n- in footer there is missing template variable\r\n\r\nI've just tested it and this is how the email looks like:\r\n\r\n\r\n\n", "before_files": [{"content": "from celery import shared_task\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom django.urls import reverse\nfrom templated_email import send_templated_mail\n\nfrom ..core.utils import build_absolute_uri\nfrom ..seo.schema.email import get_order_confirmation_markup\nfrom .models import Fulfillment, Order\n\nCONFIRM_ORDER_TEMPLATE = 'source/order/confirm_order'\nCONFIRM_FULFILLMENT_TEMPLATE = 'source/order/confirm_fulfillment'\nUPDATE_FULFILLMENT_TEMPLATE = 'source/order/update_fulfillment'\nCONFIRM_PAYMENT_TEMPLATE = 'source/order/payment/confirm_payment'\nCONFIRM_NOTE_TEMPLATE = 'source/order/note/confirm_note'\n\n\ndef get_email_context(order_token):\n \"\"\"Prepares context required for email template rendering.\"\"\"\n site = Site.objects.get_current()\n order_url = build_absolute_uri(\n reverse('order:details', kwargs={'token': order_token}))\n ctx = {\n 'protocol': 'https' if settings.ENABLE_SSL else 'http',\n 'site_name': site.name,\n 'domain': site.domain,\n 'url': order_url}\n return ctx\n\n\ndef collect_data_for_email(order_pk, template):\n \"\"\"Collects data required for email sending.\n\n Args:\n order_pk (int): order primary key\n template (str): email template path\n \"\"\"\n order = Order.objects.get(pk=order_pk)\n recipient_email = order.get_user_current_email()\n email_context = get_email_context(order.token)\n\n # Order confirmation template requires additional information\n if template == CONFIRM_ORDER_TEMPLATE:\n email_markup = get_order_confirmation_markup(order)\n email_context.update(\n {'order': order, 'schema_markup': email_markup})\n\n return {\n 'recipient_list': [recipient_email], 'template_name': template,\n 'context': email_context, 'from_email': settings.ORDER_FROM_EMAIL}\n\n\ndef collect_data_for_fullfillment_email(order_pk, template, fulfillment_pk):\n fulfillment = Fulfillment.objects.get(pk=fulfillment_pk)\n email_data = collect_data_for_email(order_pk, template)\n email_data.update({'context': {'fulfillment': fulfillment}})\n return email_data\n\n\n@shared_task\ndef send_order_confirmation(order_pk):\n \"\"\"Sends order confirmation email.\"\"\"\n email_data = collect_data_for_email(order_pk, CONFIRM_ORDER_TEMPLATE)\n send_templated_mail(**email_data)\n\n\n@shared_task\ndef send_fulfillment_confirmation(order_pk, fulfillment_pk):\n email_data = collect_data_for_fullfillment_email(\n order_pk, CONFIRM_FULFILLMENT_TEMPLATE, fulfillment_pk)\n send_templated_mail(**email_data)\n\n\n@shared_task\ndef send_fulfillment_update(order_pk, fulfillment_pk):\n email_data = collect_data_for_fullfillment_email(\n order_pk, UPDATE_FULFILLMENT_TEMPLATE, fulfillment_pk)\n send_templated_mail(**email_data)\n\n\n@shared_task\ndef send_payment_confirmation(order_pk):\n \"\"\"Sends payment confirmation email.\"\"\"\n email_data = collect_data_for_email(order_pk, CONFIRM_PAYMENT_TEMPLATE)\n send_templated_mail(**email_data)\n\n\n@shared_task\ndef send_note_confirmation(order_pk):\n \"\"\"Notifies customer, when new note was added to an order.\"\"\"\n email_data = collect_data_for_email(order_pk, CONFIRM_NOTE_TEMPLATE)\n send_templated_mail(**email_data)\n", "path": "saleor/order/emails.py"}], "after_files": [{"content": "from celery import shared_task\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom django.urls import reverse\nfrom templated_email import send_templated_mail\n\nfrom ..core.utils import build_absolute_uri\nfrom ..seo.schema.email import get_order_confirmation_markup\nfrom .models import Fulfillment, Order\n\nCONFIRM_ORDER_TEMPLATE = 'source/order/confirm_order'\nCONFIRM_FULFILLMENT_TEMPLATE = 'source/order/confirm_fulfillment'\nUPDATE_FULFILLMENT_TEMPLATE = 'source/order/update_fulfillment'\nCONFIRM_PAYMENT_TEMPLATE = 'source/order/payment/confirm_payment'\nCONFIRM_NOTE_TEMPLATE = 'source/order/note/confirm_note'\n\n\ndef get_email_context(order_token):\n \"\"\"Prepares context required for email template rendering.\"\"\"\n site = Site.objects.get_current()\n order_url = build_absolute_uri(\n reverse('order:details', kwargs={'token': order_token}))\n ctx = {\n 'protocol': 'https' if settings.ENABLE_SSL else 'http',\n 'site_name': site.name,\n 'domain': site.domain,\n 'url': order_url}\n return ctx\n\n\ndef collect_data_for_email(order_pk, template):\n \"\"\"Collects data required for email sending.\n\n Args:\n order_pk (int): order primary key\n template (str): email template path\n \"\"\"\n order = Order.objects.get(pk=order_pk)\n recipient_email = order.get_user_current_email()\n email_context = get_email_context(order.token)\n\n # Order confirmation template requires additional information\n if template == CONFIRM_ORDER_TEMPLATE:\n email_markup = get_order_confirmation_markup(order)\n email_context.update(\n {'order': order, 'schema_markup': email_markup})\n\n return {\n 'recipient_list': [recipient_email], 'template_name': template,\n 'context': email_context, 'from_email': settings.ORDER_FROM_EMAIL}\n\n\ndef collect_data_for_fullfillment_email(order_pk, template, fulfillment_pk):\n fulfillment = Fulfillment.objects.get(pk=fulfillment_pk)\n email_data = collect_data_for_email(order_pk, template)\n email_data['context'].update({'fulfillment': fulfillment})\n return email_data\n\n\n@shared_task\ndef send_order_confirmation(order_pk):\n \"\"\"Sends order confirmation email.\"\"\"\n email_data = collect_data_for_email(order_pk, CONFIRM_ORDER_TEMPLATE)\n send_templated_mail(**email_data)\n\n\n@shared_task\ndef send_fulfillment_confirmation(order_pk, fulfillment_pk):\n email_data = collect_data_for_fullfillment_email(\n order_pk, CONFIRM_FULFILLMENT_TEMPLATE, fulfillment_pk)\n send_templated_mail(**email_data)\n\n\n@shared_task\ndef send_fulfillment_update(order_pk, fulfillment_pk):\n email_data = collect_data_for_fullfillment_email(\n order_pk, UPDATE_FULFILLMENT_TEMPLATE, fulfillment_pk)\n send_templated_mail(**email_data)\n\n\n@shared_task\ndef send_payment_confirmation(order_pk):\n \"\"\"Sends payment confirmation email.\"\"\"\n email_data = collect_data_for_email(order_pk, CONFIRM_PAYMENT_TEMPLATE)\n send_templated_mail(**email_data)\n\n\n@shared_task\ndef send_note_confirmation(order_pk):\n \"\"\"Notifies customer, when new note was added to an order.\"\"\"\n email_data = collect_data_for_email(order_pk, CONFIRM_NOTE_TEMPLATE)\n send_templated_mail(**email_data)\n", "path": "saleor/order/emails.py"}]} | 1,277 | 131 |
gh_patches_debug_728 | rasdani/github-patches | git_diff | speechbrain__speechbrain-1504 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Torch 1.12 not compatible?
working to install speechbrain 0.5.12, and getting the error that "speechbrain 0.5.12 requires torch<=1.11,>=1.7, but you have torch 1.12.0 which is incompatible." read elsewhere that it should work with >=1.7.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python3
2 import os
3 import sys
4 import site
5 import setuptools
6 from distutils.core import setup
7
8
9 # Editable install in user site directory can be allowed with this hack:
10 # https://github.com/pypa/pip/issues/7953.
11 site.ENABLE_USER_SITE = "--user" in sys.argv[1:]
12
13 with open("README.md") as f:
14 long_description = f.read()
15
16 with open(os.path.join("speechbrain", "version.txt")) as f:
17 version = f.read().strip()
18
19 setup(
20 name="speechbrain",
21 version=version,
22 description="All-in-one speech toolkit in pure Python and Pytorch",
23 long_description=long_description,
24 long_description_content_type="text/markdown",
25 author="Mirco Ravanelli & Others",
26 author_email="[email protected]",
27 classifiers=[
28 "Programming Language :: Python :: 3",
29 "License :: OSI Approved :: Apache Software License",
30 ],
31 packages=setuptools.find_packages(),
32 package_data={"speechbrain": ["version.txt", "log-config.yaml"]},
33 install_requires=[
34 "hyperpyyaml",
35 "joblib",
36 "numpy",
37 "packaging",
38 "scipy",
39 "sentencepiece",
40 "torch>=1.7,<=1.11",
41 "torchaudio",
42 "tqdm",
43 "huggingface_hub",
44 ],
45 python_requires=">=3.7",
46 url="https://speechbrain.github.io/",
47 )
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -37,7 +37,7 @@
"packaging",
"scipy",
"sentencepiece",
- "torch>=1.7,<=1.11",
+ "torch>=1.9",
"torchaudio",
"tqdm",
"huggingface_hub",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,7 +37,7 @@\n \"packaging\",\n \"scipy\",\n \"sentencepiece\",\n- \"torch>=1.7,<=1.11\",\n+ \"torch>=1.9\",\n \"torchaudio\",\n \"tqdm\",\n \"huggingface_hub\",\n", "issue": "Torch 1.12 not compatible?\nworking to install speechbrain 0.5.12, and getting the error that \"speechbrain 0.5.12 requires torch<=1.11,>=1.7, but you have torch 1.12.0 which is incompatible.\" read elsewhere that it should work with >=1.7. \n", "before_files": [{"content": "#!/usr/bin/env python3\nimport os\nimport sys\nimport site\nimport setuptools\nfrom distutils.core import setup\n\n\n# Editable install in user site directory can be allowed with this hack:\n# https://github.com/pypa/pip/issues/7953.\nsite.ENABLE_USER_SITE = \"--user\" in sys.argv[1:]\n\nwith open(\"README.md\") as f:\n long_description = f.read()\n\nwith open(os.path.join(\"speechbrain\", \"version.txt\")) as f:\n version = f.read().strip()\n\nsetup(\n name=\"speechbrain\",\n version=version,\n description=\"All-in-one speech toolkit in pure Python and Pytorch\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Mirco Ravanelli & Others\",\n author_email=\"[email protected]\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: Apache Software License\",\n ],\n packages=setuptools.find_packages(),\n package_data={\"speechbrain\": [\"version.txt\", \"log-config.yaml\"]},\n install_requires=[\n \"hyperpyyaml\",\n \"joblib\",\n \"numpy\",\n \"packaging\",\n \"scipy\",\n \"sentencepiece\",\n \"torch>=1.7,<=1.11\",\n \"torchaudio\",\n \"tqdm\",\n \"huggingface_hub\",\n ],\n python_requires=\">=3.7\",\n url=\"https://speechbrain.github.io/\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport os\nimport sys\nimport site\nimport setuptools\nfrom distutils.core import setup\n\n\n# Editable install in user site directory can be allowed with this hack:\n# https://github.com/pypa/pip/issues/7953.\nsite.ENABLE_USER_SITE = \"--user\" in sys.argv[1:]\n\nwith open(\"README.md\") as f:\n long_description = f.read()\n\nwith open(os.path.join(\"speechbrain\", \"version.txt\")) as f:\n version = f.read().strip()\n\nsetup(\n name=\"speechbrain\",\n version=version,\n description=\"All-in-one speech toolkit in pure Python and Pytorch\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Mirco Ravanelli & Others\",\n author_email=\"[email protected]\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: Apache Software License\",\n ],\n packages=setuptools.find_packages(),\n package_data={\"speechbrain\": [\"version.txt\", \"log-config.yaml\"]},\n install_requires=[\n \"hyperpyyaml\",\n \"joblib\",\n \"numpy\",\n \"packaging\",\n \"scipy\",\n \"sentencepiece\",\n \"torch>=1.9\",\n \"torchaudio\",\n \"tqdm\",\n \"huggingface_hub\",\n ],\n python_requires=\">=3.7\",\n url=\"https://speechbrain.github.io/\",\n)\n", "path": "setup.py"}]} | 745 | 90 |
gh_patches_debug_41349 | rasdani/github-patches | git_diff | privacyidea__privacyidea-2589 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Migration fails when skipping the 3.3 version
In v3.3 the dedicated admin user was added to the policy table and the corresponding data was migrated (https://github.com/privacyidea/privacyidea/blob/master/migrations/versions/a7e91b18a460_.py).
But if the migration skips the 3.3 version, the data migration fails because the [`models.py`](https://github.com/privacyidea/privacyidea/blob/master/privacyidea/models.py) describes a different version of the database than the physical version.
By using a temporary policy table description in the migration script we can mitigate this failure.
This applies to privacyIDEA version 3.4 and up.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `migrations/versions/a7e91b18a460_.py`
Content:
```
1 """add dedicated adminuser to policies
2
3 Revision ID: a7e91b18a460
4 Revises: 0c7123345224
5 Create Date: 2020-01-29 13:42:15.390923
6
7 """
8
9 # revision identifiers, used by Alembic.
10 revision = 'a7e91b18a460'
11 down_revision = '0c7123345224'
12
13 from alembic import op
14 import sqlalchemy as sa
15 from privacyidea.models import Policy
16 from sqlalchemy import orm
17
18
19 def upgrade():
20 try:
21 op.add_column('policy', sa.Column('adminuser', sa.Unicode(length=256), nullable=True))
22 except Exception as exx:
23 print('Adding of column "adminuser" in table policy failed: {!r}'.format(exx))
24 print('This is expected behavior if this column already exists.')
25
26 # Now that we added the column in the table, we can move the "user" from admin-policies to
27 # the "adminuser" column
28
29 try:
30 bind = op.get_bind()
31 session = orm.Session(bind=bind)
32 pol_name = None
33 for policy in session.query(Policy).filter(Policy.user != "", Policy.scope == "admin"):
34 pol_name = policy.name
35 # move the "user" to the "adminuser"
36 policy.adminuser = policy.user
37 policy.user = u""
38 session.commit()
39 except Exception as exx:
40 session.rollback()
41 print("Failed to migrate column adminuser in policies due to error in policy '{0!s}'.".format(pol_name))
42 print(exx)
43
44
45 def downgrade():
46 op.drop_column('policy', 'adminuser')
47
```
Path: `privacyidea/lib/auditmodules/containeraudit.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # 2019-11-07 Cornelius Kölbel <[email protected]>
4 # initial code for writing audit information to a file
5 #
6 # This code is free software; you can redistribute it and/or
7 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
8 # License as published by the Free Software Foundation; either
9 # version 3 of the License, or any later version.
10 #
11 # This code is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU AFFERO GENERAL PUBLIC LICENSE for more details.
15 #
16 # You should have received a copy of the GNU Affero General Public
17 # License along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 #
20 __doc__ = """The Container Audit Module allows to write audit information to several different
21 audit modules at the same time. E.g. it can write audit information to the SQL Audit Module and to the
22 Logger Audit Module. This way audit information can be saved in the SQL database and at the same time
23 be passed to a file or external services via the Python logging facility.
24
25 The Container Audit Module is configured like this:
26
27 PI_AUDIT_MODULE = 'privacyidea.lib.auditmodules.containeraudit'
28 PI_AUDIT_CONTAINER_WRITE = ['privacyidea.lib.auditmodules.sqlaudit','privacyidea.lib.auditmodules.loggeraudit']
29 PI_AUDIT_CONTAINER_READ = 'privacyidea.lib.auditmodules.sqlaudit'
30
31 You also have to provide the configuration parameters for the referenced audit modules.
32
33 """
34
35 import logging
36 from privacyidea.lib.auditmodules.base import (Audit as AuditBase)
37 from privacyidea.lib.utils import get_module_class
38
39
40 log = logging.getLogger(__name__)
41
42
43 class Audit(AuditBase):
44 """
45 This is the ContainerAudit module, which writes the audit entries
46 to a list of audit modules.
47 """
48
49 def __init__(self, config=None):
50 super(Audit, self).__init__(config)
51 self.name = "containeraudit"
52 write_conf = self.config.get('PI_AUDIT_CONTAINER_WRITE')
53 read_conf = self.config.get('PI_AUDIT_CONTAINER_READ')
54 # Initialize all modules
55 self.write_modules = [get_module_class(audit_module, "Audit", "log")(config) for audit_module in write_conf]
56 self.read_module = get_module_class(read_conf, "Audit", "log")(config)
57 if not self.read_module.is_readable:
58 log.warning(u"The specified PI_AUDIT_CONTAINER_READ {0!s} is not readable.".format(self.read_module))
59
60 @property
61 def has_data(self):
62 return any([x.has_data for x in self.write_modules])
63
64 def log(self, param):
65 """
66 Call the log method for all writeable modules
67 """
68 for module in self.write_modules:
69 module.log(param)
70
71 def add_to_log(self, param, add_with_comma=False):
72 """
73 Call the add_to_log method for all writeable modules
74 """
75 for module in self.write_modules:
76 module.add_to_log(param, add_with_comma)
77
78 def add_policy(self, policyname):
79 """
80 Call the add_policy method for all writeable modules
81 """
82 for module in self.write_modules:
83 module.add_policy(policyname)
84
85 def search(self, search_dict, page_size=15, page=1, sortorder="asc",
86 timelimit=None):
87 """
88 Call the search method for the one readable module
89 """
90 return self.read_module.search(search_dict, page_size=page_size, page=page,
91 sortorder=sortorder, timelimit=timelimit)
92
93 def get_count(self, search_dict, timedelta=None, success=None):
94 """
95 Call the count method for the one readable module
96 """
97 return self.read_module.get_count(search_dict, timedelta=timedelta, success=success)
98
99 def csv_generator(self, param=None, user=None, timelimit=None):
100 """
101 Call the csv_generator method for the one readable module
102 """
103 return self.read_module.csv_generator(param=param, user=user,
104 timelimit=timelimit)
105
106 def get_total(self, param, AND=True, display_error=True, timelimit=None):
107 """
108 Call the total method for the one readable module
109 """
110 return self.read_module.get_total(param, AND=AND, display_error=display_error, timelimit=timelimit)
111
112 def finalize_log(self):
113 """
114 Call the finalize method of all writeable audit modules
115 """
116 for module in self.write_modules:
117 module.finalize_log()
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/migrations/versions/a7e91b18a460_.py b/migrations/versions/a7e91b18a460_.py
--- a/migrations/versions/a7e91b18a460_.py
+++ b/migrations/versions/a7e91b18a460_.py
@@ -12,8 +12,31 @@
from alembic import op
import sqlalchemy as sa
-from privacyidea.models import Policy
from sqlalchemy import orm
+from sqlalchemy.ext.declarative import declarative_base
+
+Base = declarative_base()
+
+
+# Use table definition from the point where the changes took place. If we use
+# the table definition from models.py we might run into problems
+class Policy(Base):
+ __tablename__ = "policy"
+ __table_args__ = {'mysql_row_format': 'DYNAMIC'}
+ id = sa.Column(sa.Integer, sa.Sequence("policy_seq"), primary_key=True)
+ active = sa.Column(sa.Boolean, default=True)
+ check_all_resolvers = sa.Column(sa.Boolean, default=False)
+ name = sa.Column(sa.Unicode(64), unique=True, nullable=False)
+ scope = sa.Column(sa.Unicode(32), nullable=False)
+ action = sa.Column(sa.Unicode(2000), default=u"")
+ realm = sa.Column(sa.Unicode(256), default=u"")
+ adminrealm = sa.Column(sa.Unicode(256), default=u"")
+ adminuser = sa.Column(sa.Unicode(256), default=u"")
+ resolver = sa.Column(sa.Unicode(256), default=u"")
+ user = sa.Column(sa.Unicode(256), default=u"")
+ client = sa.Column(sa.Unicode(256), default=u"")
+ time = sa.Column(sa.Unicode(64), default=u"")
+ priority = sa.Column(sa.Integer, default=1, nullable=False)
def upgrade():
@@ -25,12 +48,11 @@
# Now that we added the column in the table, we can move the "user" from admin-policies to
# the "adminuser" column
-
+ bind = op.get_bind()
+ session = orm.Session(bind=bind)
+ pol_name = None
try:
- bind = op.get_bind()
- session = orm.Session(bind=bind)
- pol_name = None
- for policy in session.query(Policy).filter(Policy.user != "", Policy.scope == "admin"):
+ for policy in session.query(Policy).filter(Policy.user != u"", Policy.scope == u"admin"):
pol_name = policy.name
# move the "user" to the "adminuser"
policy.adminuser = policy.user
@@ -38,7 +60,8 @@
session.commit()
except Exception as exx:
session.rollback()
- print("Failed to migrate column adminuser in policies due to error in policy '{0!s}'.".format(pol_name))
+ print("Failed to migrate column adminuser in policies due to error in "
+ "policy '{0!s}'.".format(pol_name))
print(exx)
diff --git a/privacyidea/lib/auditmodules/containeraudit.py b/privacyidea/lib/auditmodules/containeraudit.py
--- a/privacyidea/lib/auditmodules/containeraudit.py
+++ b/privacyidea/lib/auditmodules/containeraudit.py
@@ -46,14 +46,15 @@
to a list of audit modules.
"""
- def __init__(self, config=None):
- super(Audit, self).__init__(config)
+ def __init__(self, config=None, startdate=None):
+ super(Audit, self).__init__(config, startdate)
self.name = "containeraudit"
write_conf = self.config.get('PI_AUDIT_CONTAINER_WRITE')
read_conf = self.config.get('PI_AUDIT_CONTAINER_READ')
# Initialize all modules
- self.write_modules = [get_module_class(audit_module, "Audit", "log")(config) for audit_module in write_conf]
- self.read_module = get_module_class(read_conf, "Audit", "log")(config)
+ self.write_modules = [get_module_class(audit_module, "Audit", "log")(config, startdate)
+ for audit_module in write_conf]
+ self.read_module = get_module_class(read_conf, "Audit", "log")(config, startdate)
if not self.read_module.is_readable:
log.warning(u"The specified PI_AUDIT_CONTAINER_READ {0!s} is not readable.".format(self.read_module))
| {"golden_diff": "diff --git a/migrations/versions/a7e91b18a460_.py b/migrations/versions/a7e91b18a460_.py\n--- a/migrations/versions/a7e91b18a460_.py\n+++ b/migrations/versions/a7e91b18a460_.py\n@@ -12,8 +12,31 @@\n \n from alembic import op\n import sqlalchemy as sa\n-from privacyidea.models import Policy\n from sqlalchemy import orm\n+from sqlalchemy.ext.declarative import declarative_base\n+\n+Base = declarative_base()\n+\n+\n+# Use table definition from the point where the changes took place. If we use\n+# the table definition from models.py we might run into problems\n+class Policy(Base):\n+ __tablename__ = \"policy\"\n+ __table_args__ = {'mysql_row_format': 'DYNAMIC'}\n+ id = sa.Column(sa.Integer, sa.Sequence(\"policy_seq\"), primary_key=True)\n+ active = sa.Column(sa.Boolean, default=True)\n+ check_all_resolvers = sa.Column(sa.Boolean, default=False)\n+ name = sa.Column(sa.Unicode(64), unique=True, nullable=False)\n+ scope = sa.Column(sa.Unicode(32), nullable=False)\n+ action = sa.Column(sa.Unicode(2000), default=u\"\")\n+ realm = sa.Column(sa.Unicode(256), default=u\"\")\n+ adminrealm = sa.Column(sa.Unicode(256), default=u\"\")\n+ adminuser = sa.Column(sa.Unicode(256), default=u\"\")\n+ resolver = sa.Column(sa.Unicode(256), default=u\"\")\n+ user = sa.Column(sa.Unicode(256), default=u\"\")\n+ client = sa.Column(sa.Unicode(256), default=u\"\")\n+ time = sa.Column(sa.Unicode(64), default=u\"\")\n+ priority = sa.Column(sa.Integer, default=1, nullable=False)\n \n \n def upgrade():\n@@ -25,12 +48,11 @@\n \n # Now that we added the column in the table, we can move the \"user\" from admin-policies to\n # the \"adminuser\" column\n-\n+ bind = op.get_bind()\n+ session = orm.Session(bind=bind)\n+ pol_name = None\n try:\n- bind = op.get_bind()\n- session = orm.Session(bind=bind)\n- pol_name = None\n- for policy in session.query(Policy).filter(Policy.user != \"\", Policy.scope == \"admin\"):\n+ for policy in session.query(Policy).filter(Policy.user != u\"\", Policy.scope == u\"admin\"):\n pol_name = policy.name\n # move the \"user\" to the \"adminuser\"\n policy.adminuser = policy.user\n@@ -38,7 +60,8 @@\n session.commit()\n except Exception as exx:\n session.rollback()\n- print(\"Failed to migrate column adminuser in policies due to error in policy '{0!s}'.\".format(pol_name))\n+ print(\"Failed to migrate column adminuser in policies due to error in \"\n+ \"policy '{0!s}'.\".format(pol_name))\n print(exx)\n \n \ndiff --git a/privacyidea/lib/auditmodules/containeraudit.py b/privacyidea/lib/auditmodules/containeraudit.py\n--- a/privacyidea/lib/auditmodules/containeraudit.py\n+++ b/privacyidea/lib/auditmodules/containeraudit.py\n@@ -46,14 +46,15 @@\n to a list of audit modules.\n \"\"\"\n \n- def __init__(self, config=None):\n- super(Audit, self).__init__(config)\n+ def __init__(self, config=None, startdate=None):\n+ super(Audit, self).__init__(config, startdate)\n self.name = \"containeraudit\"\n write_conf = self.config.get('PI_AUDIT_CONTAINER_WRITE')\n read_conf = self.config.get('PI_AUDIT_CONTAINER_READ')\n # Initialize all modules\n- self.write_modules = [get_module_class(audit_module, \"Audit\", \"log\")(config) for audit_module in write_conf]\n- self.read_module = get_module_class(read_conf, \"Audit\", \"log\")(config)\n+ self.write_modules = [get_module_class(audit_module, \"Audit\", \"log\")(config, startdate)\n+ for audit_module in write_conf]\n+ self.read_module = get_module_class(read_conf, \"Audit\", \"log\")(config, startdate)\n if not self.read_module.is_readable:\n log.warning(u\"The specified PI_AUDIT_CONTAINER_READ {0!s} is not readable.\".format(self.read_module))\n", "issue": "Migration fails when skipping the 3.3 version\nIn v3.3 the dedicated admin user was added to the policy table and the corresponding data was migrated (https://github.com/privacyidea/privacyidea/blob/master/migrations/versions/a7e91b18a460_.py).\r\nBut if the migration skips the 3.3 version, the data migration fails because the [`models.py`](https://github.com/privacyidea/privacyidea/blob/master/privacyidea/models.py) describes a different version of the database than the physical version.\r\nBy using a temporary policy table description in the migration script we can mitigate this failure.\r\nThis applies to privacyIDEA version 3.4 and up.\n", "before_files": [{"content": "\"\"\"add dedicated adminuser to policies\n\nRevision ID: a7e91b18a460\nRevises: 0c7123345224\nCreate Date: 2020-01-29 13:42:15.390923\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'a7e91b18a460'\ndown_revision = '0c7123345224'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom privacyidea.models import Policy\nfrom sqlalchemy import orm\n\n\ndef upgrade():\n try:\n op.add_column('policy', sa.Column('adminuser', sa.Unicode(length=256), nullable=True))\n except Exception as exx:\n print('Adding of column \"adminuser\" in table policy failed: {!r}'.format(exx))\n print('This is expected behavior if this column already exists.')\n\n # Now that we added the column in the table, we can move the \"user\" from admin-policies to\n # the \"adminuser\" column\n\n try:\n bind = op.get_bind()\n session = orm.Session(bind=bind)\n pol_name = None\n for policy in session.query(Policy).filter(Policy.user != \"\", Policy.scope == \"admin\"):\n pol_name = policy.name\n # move the \"user\" to the \"adminuser\"\n policy.adminuser = policy.user\n policy.user = u\"\"\n session.commit()\n except Exception as exx:\n session.rollback()\n print(\"Failed to migrate column adminuser in policies due to error in policy '{0!s}'.\".format(pol_name))\n print(exx)\n\n\ndef downgrade():\n op.drop_column('policy', 'adminuser')\n", "path": "migrations/versions/a7e91b18a460_.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# 2019-11-07 Cornelius K\u00f6lbel <[email protected]>\n# initial code for writing audit information to a file\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n#\n__doc__ = \"\"\"The Container Audit Module allows to write audit information to several different\naudit modules at the same time. E.g. it can write audit information to the SQL Audit Module and to the \nLogger Audit Module. This way audit information can be saved in the SQL database and at the same time\nbe passed to a file or external services via the Python logging facility. \n\nThe Container Audit Module is configured like this:\n\n PI_AUDIT_MODULE = 'privacyidea.lib.auditmodules.containeraudit'\n PI_AUDIT_CONTAINER_WRITE = ['privacyidea.lib.auditmodules.sqlaudit','privacyidea.lib.auditmodules.loggeraudit']\n PI_AUDIT_CONTAINER_READ = 'privacyidea.lib.auditmodules.sqlaudit'\n\nYou also have to provide the configuration parameters for the referenced audit modules.\n\n\"\"\"\n\nimport logging\nfrom privacyidea.lib.auditmodules.base import (Audit as AuditBase)\nfrom privacyidea.lib.utils import get_module_class\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Audit(AuditBase):\n \"\"\"\n This is the ContainerAudit module, which writes the audit entries\n to a list of audit modules.\n \"\"\"\n\n def __init__(self, config=None):\n super(Audit, self).__init__(config)\n self.name = \"containeraudit\"\n write_conf = self.config.get('PI_AUDIT_CONTAINER_WRITE')\n read_conf = self.config.get('PI_AUDIT_CONTAINER_READ')\n # Initialize all modules\n self.write_modules = [get_module_class(audit_module, \"Audit\", \"log\")(config) for audit_module in write_conf]\n self.read_module = get_module_class(read_conf, \"Audit\", \"log\")(config)\n if not self.read_module.is_readable:\n log.warning(u\"The specified PI_AUDIT_CONTAINER_READ {0!s} is not readable.\".format(self.read_module))\n\n @property\n def has_data(self):\n return any([x.has_data for x in self.write_modules])\n\n def log(self, param):\n \"\"\"\n Call the log method for all writeable modules\n \"\"\"\n for module in self.write_modules:\n module.log(param)\n\n def add_to_log(self, param, add_with_comma=False):\n \"\"\"\n Call the add_to_log method for all writeable modules\n \"\"\"\n for module in self.write_modules:\n module.add_to_log(param, add_with_comma)\n\n def add_policy(self, policyname):\n \"\"\"\n Call the add_policy method for all writeable modules\n \"\"\"\n for module in self.write_modules:\n module.add_policy(policyname)\n\n def search(self, search_dict, page_size=15, page=1, sortorder=\"asc\",\n timelimit=None):\n \"\"\"\n Call the search method for the one readable module\n \"\"\"\n return self.read_module.search(search_dict, page_size=page_size, page=page,\n sortorder=sortorder, timelimit=timelimit)\n\n def get_count(self, search_dict, timedelta=None, success=None):\n \"\"\"\n Call the count method for the one readable module\n \"\"\"\n return self.read_module.get_count(search_dict, timedelta=timedelta, success=success)\n\n def csv_generator(self, param=None, user=None, timelimit=None):\n \"\"\"\n Call the csv_generator method for the one readable module\n \"\"\"\n return self.read_module.csv_generator(param=param, user=user,\n timelimit=timelimit)\n\n def get_total(self, param, AND=True, display_error=True, timelimit=None):\n \"\"\"\n Call the total method for the one readable module\n \"\"\"\n return self.read_module.get_total(param, AND=AND, display_error=display_error, timelimit=timelimit)\n\n def finalize_log(self):\n \"\"\"\n Call the finalize method of all writeable audit modules\n \"\"\"\n for module in self.write_modules:\n module.finalize_log()\n", "path": "privacyidea/lib/auditmodules/containeraudit.py"}], "after_files": [{"content": "\"\"\"add dedicated adminuser to policies\n\nRevision ID: a7e91b18a460\nRevises: 0c7123345224\nCreate Date: 2020-01-29 13:42:15.390923\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'a7e91b18a460'\ndown_revision = '0c7123345224'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy import orm\nfrom sqlalchemy.ext.declarative import declarative_base\n\nBase = declarative_base()\n\n\n# Use table definition from the point where the changes took place. If we use\n# the table definition from models.py we might run into problems\nclass Policy(Base):\n __tablename__ = \"policy\"\n __table_args__ = {'mysql_row_format': 'DYNAMIC'}\n id = sa.Column(sa.Integer, sa.Sequence(\"policy_seq\"), primary_key=True)\n active = sa.Column(sa.Boolean, default=True)\n check_all_resolvers = sa.Column(sa.Boolean, default=False)\n name = sa.Column(sa.Unicode(64), unique=True, nullable=False)\n scope = sa.Column(sa.Unicode(32), nullable=False)\n action = sa.Column(sa.Unicode(2000), default=u\"\")\n realm = sa.Column(sa.Unicode(256), default=u\"\")\n adminrealm = sa.Column(sa.Unicode(256), default=u\"\")\n adminuser = sa.Column(sa.Unicode(256), default=u\"\")\n resolver = sa.Column(sa.Unicode(256), default=u\"\")\n user = sa.Column(sa.Unicode(256), default=u\"\")\n client = sa.Column(sa.Unicode(256), default=u\"\")\n time = sa.Column(sa.Unicode(64), default=u\"\")\n priority = sa.Column(sa.Integer, default=1, nullable=False)\n\n\ndef upgrade():\n try:\n op.add_column('policy', sa.Column('adminuser', sa.Unicode(length=256), nullable=True))\n except Exception as exx:\n print('Adding of column \"adminuser\" in table policy failed: {!r}'.format(exx))\n print('This is expected behavior if this column already exists.')\n\n # Now that we added the column in the table, we can move the \"user\" from admin-policies to\n # the \"adminuser\" column\n bind = op.get_bind()\n session = orm.Session(bind=bind)\n pol_name = None\n try:\n for policy in session.query(Policy).filter(Policy.user != u\"\", Policy.scope == u\"admin\"):\n pol_name = policy.name\n # move the \"user\" to the \"adminuser\"\n policy.adminuser = policy.user\n policy.user = u\"\"\n session.commit()\n except Exception as exx:\n session.rollback()\n print(\"Failed to migrate column adminuser in policies due to error in \"\n \"policy '{0!s}'.\".format(pol_name))\n print(exx)\n\n\ndef downgrade():\n op.drop_column('policy', 'adminuser')\n", "path": "migrations/versions/a7e91b18a460_.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# 2019-11-07 Cornelius K\u00f6lbel <[email protected]>\n# initial code for writing audit information to a file\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n#\n__doc__ = \"\"\"The Container Audit Module allows to write audit information to several different\naudit modules at the same time. E.g. it can write audit information to the SQL Audit Module and to the \nLogger Audit Module. This way audit information can be saved in the SQL database and at the same time\nbe passed to a file or external services via the Python logging facility. \n\nThe Container Audit Module is configured like this:\n\n PI_AUDIT_MODULE = 'privacyidea.lib.auditmodules.containeraudit'\n PI_AUDIT_CONTAINER_WRITE = ['privacyidea.lib.auditmodules.sqlaudit','privacyidea.lib.auditmodules.loggeraudit']\n PI_AUDIT_CONTAINER_READ = 'privacyidea.lib.auditmodules.sqlaudit'\n\nYou also have to provide the configuration parameters for the referenced audit modules.\n\n\"\"\"\n\nimport logging\nfrom privacyidea.lib.auditmodules.base import (Audit as AuditBase)\nfrom privacyidea.lib.utils import get_module_class\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Audit(AuditBase):\n \"\"\"\n This is the ContainerAudit module, which writes the audit entries\n to a list of audit modules.\n \"\"\"\n\n def __init__(self, config=None, startdate=None):\n super(Audit, self).__init__(config, startdate)\n self.name = \"containeraudit\"\n write_conf = self.config.get('PI_AUDIT_CONTAINER_WRITE')\n read_conf = self.config.get('PI_AUDIT_CONTAINER_READ')\n # Initialize all modules\n self.write_modules = [get_module_class(audit_module, \"Audit\", \"log\")(config, startdate)\n for audit_module in write_conf]\n self.read_module = get_module_class(read_conf, \"Audit\", \"log\")(config, startdate)\n if not self.read_module.is_readable:\n log.warning(u\"The specified PI_AUDIT_CONTAINER_READ {0!s} is not readable.\".format(self.read_module))\n\n @property\n def has_data(self):\n return any([x.has_data for x in self.write_modules])\n\n def log(self, param):\n \"\"\"\n Call the log method for all writeable modules\n \"\"\"\n for module in self.write_modules:\n module.log(param)\n\n def add_to_log(self, param, add_with_comma=False):\n \"\"\"\n Call the add_to_log method for all writeable modules\n \"\"\"\n for module in self.write_modules:\n module.add_to_log(param, add_with_comma)\n\n def add_policy(self, policyname):\n \"\"\"\n Call the add_policy method for all writeable modules\n \"\"\"\n for module in self.write_modules:\n module.add_policy(policyname)\n\n def search(self, search_dict, page_size=15, page=1, sortorder=\"asc\",\n timelimit=None):\n \"\"\"\n Call the search method for the one readable module\n \"\"\"\n return self.read_module.search(search_dict, page_size=page_size, page=page,\n sortorder=sortorder, timelimit=timelimit)\n\n def get_count(self, search_dict, timedelta=None, success=None):\n \"\"\"\n Call the count method for the one readable module\n \"\"\"\n return self.read_module.get_count(search_dict, timedelta=timedelta, success=success)\n\n def csv_generator(self, param=None, user=None, timelimit=None):\n \"\"\"\n Call the csv_generator method for the one readable module\n \"\"\"\n return self.read_module.csv_generator(param=param, user=user,\n timelimit=timelimit)\n\n def get_total(self, param, AND=True, display_error=True, timelimit=None):\n \"\"\"\n Call the total method for the one readable module\n \"\"\"\n return self.read_module.get_total(param, AND=AND, display_error=display_error, timelimit=timelimit)\n\n def finalize_log(self):\n \"\"\"\n Call the finalize method of all writeable audit modules\n \"\"\"\n for module in self.write_modules:\n module.finalize_log()\n", "path": "privacyidea/lib/auditmodules/containeraudit.py"}]} | 2,187 | 1,021 |
gh_patches_debug_11334 | rasdani/github-patches | git_diff | chainer__chainer-737 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use `numpy.uintp` for `size_t`
`numpy.intp` is `ssize_t`, and is not `size_t`. When value of `ptr` allocated by `malloc` method is larger than the maximum value of `ssize_t`, `numpy.intp(ptr)` causes an error, because it checks actual value of `ptr`.
We need to use `numpy.uintp` in this case.
Note that `numpy.uintp` is not documented in the reference manual http://docs.scipy.org/doc/numpy-1.10.1/user/basics.types.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/math/matmul.py`
Content:
```
1 import numpy
2 import six
3
4 from chainer import cuda
5 from chainer import function
6 from chainer.utils import type_check
7
8
9 def _mat_ptrs(a):
10 """Creates an array of pointers to matrices
11
12 Args:
13 a: A batch of matrices on GPU
14 Returns:
15 GPU array of pointers to matrices
16 """
17 if a.shape[0] == 1:
18 return cuda.cupy.full((1,), a[0].data.ptr, dtype=numpy.intp)
19 else:
20 stride = a[1].data.ptr - a[0].data.ptr
21 return cuda.cupy.arange(
22 a[0].data.ptr,
23 a[0].data.ptr + stride * a.shape[0],
24 stride,
25 dtype=numpy.intp)
26
27
28 def _as_mat(x):
29 return x.reshape((len(x), 1)) if len(x.shape) == 1 else x
30
31
32 def _as_batch_mat(x):
33 return x.reshape((x.shape[0], x.shape[1], 1)) if len(x.shape) == 2 else x
34
35
36 def _matmul(a, b, transa=False, transb=False, transout=False):
37 a = _as_mat(a)
38 b = _as_mat(b)
39 if transa:
40 a = a.T
41 if transb:
42 b = b.T
43 if transout:
44 # (A B)^T = B^T A^T
45 a, b = b.T, a.T
46 return a.dot(b)
47
48
49 def _get_ld(a):
50 shape = a.shape[-2:]
51 strides = a.strides[-2:]
52 trans = numpy.argmin(strides)
53 return trans, int(max(shape[trans], max(strides) // a.itemsize))
54
55
56 def _batch_matmul_gpu(a, b, out, transa=False, transb=False, transout=False):
57 a = _as_batch_mat(a)
58 b = _as_batch_mat(b)
59 trans_axis = (0, 2, 1)
60 if transout:
61 out = out.transpose(trans_axis)
62 needtrans, _ = _get_ld(out)
63 if needtrans == 1:
64 # (A B)^T = B^T A^T
65 a, b = b, a
66 transa, transb = not transb, not transa
67 out = out.transpose(trans_axis)
68 if transa:
69 a = a.transpose(trans_axis)
70 if transb:
71 b = b.transpose(trans_axis)
72
73 transa, lda = _get_ld(a)
74 transb, ldb = _get_ld(b)
75 transout, ldout = _get_ld(out)
76 la, n, ka = a.shape
77 lb, kb, m = b.shape
78
79 assert ka == kb
80 assert transout == 0 or ldout == 1
81 assert out.shape == (la, n, m)
82
83 ap = _mat_ptrs(a)
84 bp = _mat_ptrs(b)
85 outp = _mat_ptrs(out)
86 cuda.cublas.sgemmBatched(
87 cuda.Device().cublas_handle,
88 transa,
89 transb,
90 n, m, ka, 1.0,
91 ap.data.ptr, lda,
92 bp.data.ptr, ldb,
93 0.0, outp.data.ptr, ldout, la)
94
95
96 def _check_ndim(in_type, lower=1, upper=2):
97 type_check.expect(
98 in_type.ndim >= lower,
99 in_type.ndim <= upper
100 )
101
102
103 def _convert_type(in_type, vector_ndim=1):
104 if in_type.ndim.eval() == vector_ndim:
105 in_type = type_check.Variable(
106 type_check.TypeInfo(in_type.shape.eval() + (1,),
107 in_type.dtype),
108 '%s(1-D array)' % in_type.name)
109 else:
110 in_type.name = '%s(2-D array)' % in_type.name
111 return in_type
112
113
114 def _get_check_index(trans, right, row_idx=0, col_idx=1):
115 if trans ^ right:
116 return row_idx
117 else:
118 return col_idx
119
120
121 class MatMul(function.Function):
122 def __init__(self, transa=False, transb=False):
123 self.transa = transa
124 self.transb = transb
125
126 def check_type_forward(self, in_types):
127 type_check.expect(in_types.size() == 2)
128 a_type, b_type = in_types
129
130 type_check.expect(
131 a_type.dtype == numpy.float32,
132 b_type.dtype == numpy.float32
133 )
134
135 _check_ndim(a_type)
136 _check_ndim(b_type)
137
138 a_type = _convert_type(a_type)
139 b_type = _convert_type(b_type)
140 a_idx = _get_check_index(self.transa, False)
141 b_idx = _get_check_index(self.transb, True)
142 type_check.expect(
143 a_type.shape[a_idx] == b_type.shape[b_idx]
144 )
145
146 def forward(self, x):
147 return _matmul(x[0], x[1], transa=self.transa, transb=self.transb),
148
149 def backward(self, x, gy):
150 gx0 = _matmul(
151 gy[0], x[1], transb=not self.transb, transout=self.transa
152 ).reshape(x[0].shape)
153 gx1 = _matmul(
154 x[0], gy[0], transa=not self.transa, transout=self.transb
155 ).reshape(x[1].shape)
156 return gx0, gx1
157
158
159 def matmul(a, b, transa=False, transb=False):
160 """Computes the matrix multiplication of two arrays.
161
162 Args:
163 a (Variable): The left operand of the matrix multiplication.
164 A 1-D array of shape (N,) is considered as an Nx1 matrix.
165 A 2-D array of shape (M, N) is considered as an MxN matrix.
166 b (Variable): The right operand of the matrix multiplication.
167 Its array is treated as a matrix in the same way as ``a``'s array.
168 transa (bool): If true, transpose a.
169 transb (bool): If true, transpose b.
170
171 Returns:
172 ~chainer.Variable: The result of the matrix multiplication as a 2-D
173 array.
174 """
175 return MatMul(transa=transa, transb=transb)(a, b)
176
177
178 class BatchMatMul(function.Function):
179 def __init__(self, transa=False, transb=False):
180 self.transa = transa
181 self.transb = transb
182
183 def _output_shape(self, a, b):
184 batch_size = a.shape[0]
185 a_mat_shape = _as_mat(a[0]).shape
186 b_mat_shape = _as_mat(b[0]).shape
187 m = a_mat_shape[1 if self.transa else 0]
188 n = b_mat_shape[0 if self.transb else 1]
189 return (batch_size, m, n)
190
191 def check_type_forward(self, in_types):
192 type_check.expect(in_types.size() == 2)
193 a_type, b_type = in_types
194
195 type_check.expect(
196 a_type.dtype == numpy.float32,
197 b_type.dtype == numpy.float32
198 )
199
200 _check_ndim(a_type, lower=2, upper=3)
201 _check_ndim(b_type, lower=2, upper=3)
202
203 a_type = _convert_type(a_type, vector_ndim=2)
204 b_type = _convert_type(b_type, vector_ndim=2)
205 a_idx = _get_check_index(self.transa, False, row_idx=1, col_idx=2)
206 b_idx = _get_check_index(self.transb, True, row_idx=1, col_idx=2)
207 type_check.expect(
208 a_type.shape[a_idx] == b_type.shape[b_idx]
209 )
210
211 def forward_cpu(self, x):
212 a, b = x
213 batch_size = a.shape[0]
214 shape = self._output_shape(a, b)
215 ret_dtype = numpy.find_common_type([a.dtype, b.dtype], [])
216 ret = numpy.empty(shape, dtype=ret_dtype)
217 for i in six.moves.range(batch_size):
218 ret[i] = _matmul(
219 a[i], b[i], transa=self.transa, transb=self.transb)
220 return ret,
221
222 def backward_cpu(self, x, gy):
223 a, b = x
224 batch_size = a.shape[0]
225 ga = numpy.empty_like(a)
226 gb = numpy.empty_like(b)
227 for i in six.moves.range(batch_size):
228 ga[i] = _matmul(
229 gy[0][i], b[i], transb=not self.transb,
230 transout=self.transa).reshape(a[0].shape)
231 gb[i] = _matmul(
232 a[i], gy[0][i], transa=not self.transa,
233 transout=self.transb).reshape(b[0].shape)
234 return ga, gb
235
236 def forward_gpu(self, x):
237 a, b = x
238 shape = self._output_shape(a, b)
239 ret = cuda.zeros(shape)
240 _batch_matmul_gpu(
241 a, b, transa=self.transa, transb=self.transb, out=ret)
242 return ret,
243
244 def backward_gpu(self, x, gy):
245 a, b = x
246 batch_size = a.shape[0]
247 ga = cuda.empty((batch_size,) + _as_mat(a[0]).shape)
248 gb = cuda.empty((batch_size,) + _as_mat(b[0]).shape)
249 _batch_matmul_gpu(
250 gy[0], b, transb=not self.transb, transout=self.transa, out=ga)
251 _batch_matmul_gpu(
252 a, gy[0], transa=not self.transa, transout=self.transb, out=gb)
253 ga = ga.reshape(a.shape)
254 gb = gb.reshape(b.shape)
255 return ga, gb
256
257
258 def batch_matmul(a, b, transa=False, transb=False):
259 """Computes the batch matrix multiplications of two sets of arrays.
260
261 Args:
262 a (Variable): The left operand of the batch matrix multiplications.
263 A 2-D array of shape (B, N,) is considered as B Nx1 matrices.
264 A 3-D array of shape (B, M, N) is considered as B MxN matrices.
265 b (Variable): The right operand of the batch matrix multiplications.
266 Its array is treated as matrices in the same way as ``a``'s array.
267 transa (bool): If true, transpose each matrix in a.
268 transb (bool): If true, transpose each matrix in b.
269
270 Returns:
271 ~chainer.Variable: The result of the batch matrix multiplications as a
272 3-D array.
273 """
274 return BatchMatMul(transa=transa, transb=transb)(a, b)
275
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/math/matmul.py b/chainer/functions/math/matmul.py
--- a/chainer/functions/math/matmul.py
+++ b/chainer/functions/math/matmul.py
@@ -15,14 +15,14 @@
GPU array of pointers to matrices
"""
if a.shape[0] == 1:
- return cuda.cupy.full((1,), a[0].data.ptr, dtype=numpy.intp)
+ return cuda.cupy.full((1,), a[0].data.ptr, dtype=numpy.uintp)
else:
stride = a[1].data.ptr - a[0].data.ptr
return cuda.cupy.arange(
a[0].data.ptr,
a[0].data.ptr + stride * a.shape[0],
stride,
- dtype=numpy.intp)
+ dtype=numpy.uintp)
def _as_mat(x):
| {"golden_diff": "diff --git a/chainer/functions/math/matmul.py b/chainer/functions/math/matmul.py\n--- a/chainer/functions/math/matmul.py\n+++ b/chainer/functions/math/matmul.py\n@@ -15,14 +15,14 @@\n GPU array of pointers to matrices\n \"\"\"\n if a.shape[0] == 1:\n- return cuda.cupy.full((1,), a[0].data.ptr, dtype=numpy.intp)\n+ return cuda.cupy.full((1,), a[0].data.ptr, dtype=numpy.uintp)\n else:\n stride = a[1].data.ptr - a[0].data.ptr\n return cuda.cupy.arange(\n a[0].data.ptr,\n a[0].data.ptr + stride * a.shape[0],\n stride,\n- dtype=numpy.intp)\n+ dtype=numpy.uintp)\n \n \n def _as_mat(x):\n", "issue": "Use `numpy.uintp` for `size_t`\n`numpy.intp` is `ssize_t`, and is not `size_t`. When value of `ptr` allocated by `malloc` method is larger than the maximum value of `ssize_t`, `numpy.intp(ptr)` causes an error, because it checks actual value of `ptr`.\nWe need to use `numpy.uintp` in this case.\n\nNote that `numpy.uintp` is not documented in the reference manual http://docs.scipy.org/doc/numpy-1.10.1/user/basics.types.html\n\n", "before_files": [{"content": "import numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\ndef _mat_ptrs(a):\n \"\"\"Creates an array of pointers to matrices\n\n Args:\n a: A batch of matrices on GPU\n Returns:\n GPU array of pointers to matrices\n \"\"\"\n if a.shape[0] == 1:\n return cuda.cupy.full((1,), a[0].data.ptr, dtype=numpy.intp)\n else:\n stride = a[1].data.ptr - a[0].data.ptr\n return cuda.cupy.arange(\n a[0].data.ptr,\n a[0].data.ptr + stride * a.shape[0],\n stride,\n dtype=numpy.intp)\n\n\ndef _as_mat(x):\n return x.reshape((len(x), 1)) if len(x.shape) == 1 else x\n\n\ndef _as_batch_mat(x):\n return x.reshape((x.shape[0], x.shape[1], 1)) if len(x.shape) == 2 else x\n\n\ndef _matmul(a, b, transa=False, transb=False, transout=False):\n a = _as_mat(a)\n b = _as_mat(b)\n if transa:\n a = a.T\n if transb:\n b = b.T\n if transout:\n # (A B)^T = B^T A^T\n a, b = b.T, a.T\n return a.dot(b)\n\n\ndef _get_ld(a):\n shape = a.shape[-2:]\n strides = a.strides[-2:]\n trans = numpy.argmin(strides)\n return trans, int(max(shape[trans], max(strides) // a.itemsize))\n\n\ndef _batch_matmul_gpu(a, b, out, transa=False, transb=False, transout=False):\n a = _as_batch_mat(a)\n b = _as_batch_mat(b)\n trans_axis = (0, 2, 1)\n if transout:\n out = out.transpose(trans_axis)\n needtrans, _ = _get_ld(out)\n if needtrans == 1:\n # (A B)^T = B^T A^T\n a, b = b, a\n transa, transb = not transb, not transa\n out = out.transpose(trans_axis)\n if transa:\n a = a.transpose(trans_axis)\n if transb:\n b = b.transpose(trans_axis)\n\n transa, lda = _get_ld(a)\n transb, ldb = _get_ld(b)\n transout, ldout = _get_ld(out)\n la, n, ka = a.shape\n lb, kb, m = b.shape\n\n assert ka == kb\n assert transout == 0 or ldout == 1\n assert out.shape == (la, n, m)\n\n ap = _mat_ptrs(a)\n bp = _mat_ptrs(b)\n outp = _mat_ptrs(out)\n cuda.cublas.sgemmBatched(\n cuda.Device().cublas_handle,\n transa,\n transb,\n n, m, ka, 1.0,\n ap.data.ptr, lda,\n bp.data.ptr, ldb,\n 0.0, outp.data.ptr, ldout, la)\n\n\ndef _check_ndim(in_type, lower=1, upper=2):\n type_check.expect(\n in_type.ndim >= lower,\n in_type.ndim <= upper\n )\n\n\ndef _convert_type(in_type, vector_ndim=1):\n if in_type.ndim.eval() == vector_ndim:\n in_type = type_check.Variable(\n type_check.TypeInfo(in_type.shape.eval() + (1,),\n in_type.dtype),\n '%s(1-D array)' % in_type.name)\n else:\n in_type.name = '%s(2-D array)' % in_type.name\n return in_type\n\n\ndef _get_check_index(trans, right, row_idx=0, col_idx=1):\n if trans ^ right:\n return row_idx\n else:\n return col_idx\n\n\nclass MatMul(function.Function):\n def __init__(self, transa=False, transb=False):\n self.transa = transa\n self.transb = transb\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n a_type, b_type = in_types\n\n type_check.expect(\n a_type.dtype == numpy.float32,\n b_type.dtype == numpy.float32\n )\n\n _check_ndim(a_type)\n _check_ndim(b_type)\n\n a_type = _convert_type(a_type)\n b_type = _convert_type(b_type)\n a_idx = _get_check_index(self.transa, False)\n b_idx = _get_check_index(self.transb, True)\n type_check.expect(\n a_type.shape[a_idx] == b_type.shape[b_idx]\n )\n\n def forward(self, x):\n return _matmul(x[0], x[1], transa=self.transa, transb=self.transb),\n\n def backward(self, x, gy):\n gx0 = _matmul(\n gy[0], x[1], transb=not self.transb, transout=self.transa\n ).reshape(x[0].shape)\n gx1 = _matmul(\n x[0], gy[0], transa=not self.transa, transout=self.transb\n ).reshape(x[1].shape)\n return gx0, gx1\n\n\ndef matmul(a, b, transa=False, transb=False):\n \"\"\"Computes the matrix multiplication of two arrays.\n\n Args:\n a (Variable): The left operand of the matrix multiplication.\n A 1-D array of shape (N,) is considered as an Nx1 matrix.\n A 2-D array of shape (M, N) is considered as an MxN matrix.\n b (Variable): The right operand of the matrix multiplication.\n Its array is treated as a matrix in the same way as ``a``'s array.\n transa (bool): If true, transpose a.\n transb (bool): If true, transpose b.\n\n Returns:\n ~chainer.Variable: The result of the matrix multiplication as a 2-D\n array.\n \"\"\"\n return MatMul(transa=transa, transb=transb)(a, b)\n\n\nclass BatchMatMul(function.Function):\n def __init__(self, transa=False, transb=False):\n self.transa = transa\n self.transb = transb\n\n def _output_shape(self, a, b):\n batch_size = a.shape[0]\n a_mat_shape = _as_mat(a[0]).shape\n b_mat_shape = _as_mat(b[0]).shape\n m = a_mat_shape[1 if self.transa else 0]\n n = b_mat_shape[0 if self.transb else 1]\n return (batch_size, m, n)\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n a_type, b_type = in_types\n\n type_check.expect(\n a_type.dtype == numpy.float32,\n b_type.dtype == numpy.float32\n )\n\n _check_ndim(a_type, lower=2, upper=3)\n _check_ndim(b_type, lower=2, upper=3)\n\n a_type = _convert_type(a_type, vector_ndim=2)\n b_type = _convert_type(b_type, vector_ndim=2)\n a_idx = _get_check_index(self.transa, False, row_idx=1, col_idx=2)\n b_idx = _get_check_index(self.transb, True, row_idx=1, col_idx=2)\n type_check.expect(\n a_type.shape[a_idx] == b_type.shape[b_idx]\n )\n\n def forward_cpu(self, x):\n a, b = x\n batch_size = a.shape[0]\n shape = self._output_shape(a, b)\n ret_dtype = numpy.find_common_type([a.dtype, b.dtype], [])\n ret = numpy.empty(shape, dtype=ret_dtype)\n for i in six.moves.range(batch_size):\n ret[i] = _matmul(\n a[i], b[i], transa=self.transa, transb=self.transb)\n return ret,\n\n def backward_cpu(self, x, gy):\n a, b = x\n batch_size = a.shape[0]\n ga = numpy.empty_like(a)\n gb = numpy.empty_like(b)\n for i in six.moves.range(batch_size):\n ga[i] = _matmul(\n gy[0][i], b[i], transb=not self.transb,\n transout=self.transa).reshape(a[0].shape)\n gb[i] = _matmul(\n a[i], gy[0][i], transa=not self.transa,\n transout=self.transb).reshape(b[0].shape)\n return ga, gb\n\n def forward_gpu(self, x):\n a, b = x\n shape = self._output_shape(a, b)\n ret = cuda.zeros(shape)\n _batch_matmul_gpu(\n a, b, transa=self.transa, transb=self.transb, out=ret)\n return ret,\n\n def backward_gpu(self, x, gy):\n a, b = x\n batch_size = a.shape[0]\n ga = cuda.empty((batch_size,) + _as_mat(a[0]).shape)\n gb = cuda.empty((batch_size,) + _as_mat(b[0]).shape)\n _batch_matmul_gpu(\n gy[0], b, transb=not self.transb, transout=self.transa, out=ga)\n _batch_matmul_gpu(\n a, gy[0], transa=not self.transa, transout=self.transb, out=gb)\n ga = ga.reshape(a.shape)\n gb = gb.reshape(b.shape)\n return ga, gb\n\n\ndef batch_matmul(a, b, transa=False, transb=False):\n \"\"\"Computes the batch matrix multiplications of two sets of arrays.\n\n Args:\n a (Variable): The left operand of the batch matrix multiplications.\n A 2-D array of shape (B, N,) is considered as B Nx1 matrices.\n A 3-D array of shape (B, M, N) is considered as B MxN matrices.\n b (Variable): The right operand of the batch matrix multiplications.\n Its array is treated as matrices in the same way as ``a``'s array.\n transa (bool): If true, transpose each matrix in a.\n transb (bool): If true, transpose each matrix in b.\n\n Returns:\n ~chainer.Variable: The result of the batch matrix multiplications as a\n 3-D array.\n \"\"\"\n return BatchMatMul(transa=transa, transb=transb)(a, b)\n", "path": "chainer/functions/math/matmul.py"}], "after_files": [{"content": "import numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\n\n\ndef _mat_ptrs(a):\n \"\"\"Creates an array of pointers to matrices\n\n Args:\n a: A batch of matrices on GPU\n Returns:\n GPU array of pointers to matrices\n \"\"\"\n if a.shape[0] == 1:\n return cuda.cupy.full((1,), a[0].data.ptr, dtype=numpy.uintp)\n else:\n stride = a[1].data.ptr - a[0].data.ptr\n return cuda.cupy.arange(\n a[0].data.ptr,\n a[0].data.ptr + stride * a.shape[0],\n stride,\n dtype=numpy.uintp)\n\n\ndef _as_mat(x):\n return x.reshape((len(x), 1)) if len(x.shape) == 1 else x\n\n\ndef _as_batch_mat(x):\n return x.reshape((x.shape[0], x.shape[1], 1)) if len(x.shape) == 2 else x\n\n\ndef _matmul(a, b, transa=False, transb=False, transout=False):\n a = _as_mat(a)\n b = _as_mat(b)\n if transa:\n a = a.T\n if transb:\n b = b.T\n if transout:\n # (A B)^T = B^T A^T\n a, b = b.T, a.T\n return a.dot(b)\n\n\ndef _get_ld(a):\n shape = a.shape[-2:]\n strides = a.strides[-2:]\n trans = numpy.argmin(strides)\n return trans, int(max(shape[trans], max(strides) // a.itemsize))\n\n\ndef _batch_matmul_gpu(a, b, out, transa=False, transb=False, transout=False):\n a = _as_batch_mat(a)\n b = _as_batch_mat(b)\n trans_axis = (0, 2, 1)\n if transout:\n out = out.transpose(trans_axis)\n needtrans, _ = _get_ld(out)\n if needtrans == 1:\n # (A B)^T = B^T A^T\n a, b = b, a\n transa, transb = not transb, not transa\n out = out.transpose(trans_axis)\n if transa:\n a = a.transpose(trans_axis)\n if transb:\n b = b.transpose(trans_axis)\n\n transa, lda = _get_ld(a)\n transb, ldb = _get_ld(b)\n transout, ldout = _get_ld(out)\n la, n, ka = a.shape\n lb, kb, m = b.shape\n\n assert ka == kb\n assert transout == 0 or ldout == 1\n assert out.shape == (la, n, m)\n\n ap = _mat_ptrs(a)\n bp = _mat_ptrs(b)\n outp = _mat_ptrs(out)\n cuda.cublas.sgemmBatched(\n cuda.Device().cublas_handle,\n transa,\n transb,\n n, m, ka, 1.0,\n ap.data.ptr, lda,\n bp.data.ptr, ldb,\n 0.0, outp.data.ptr, ldout, la)\n\n\ndef _check_ndim(in_type, lower=1, upper=2):\n type_check.expect(\n in_type.ndim >= lower,\n in_type.ndim <= upper\n )\n\n\ndef _convert_type(in_type, vector_ndim=1):\n if in_type.ndim.eval() == vector_ndim:\n in_type = type_check.Variable(\n type_check.TypeInfo(in_type.shape.eval() + (1,),\n in_type.dtype),\n '%s(1-D array)' % in_type.name)\n else:\n in_type.name = '%s(2-D array)' % in_type.name\n return in_type\n\n\ndef _get_check_index(trans, right, row_idx=0, col_idx=1):\n if trans ^ right:\n return row_idx\n else:\n return col_idx\n\n\nclass MatMul(function.Function):\n def __init__(self, transa=False, transb=False):\n self.transa = transa\n self.transb = transb\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n a_type, b_type = in_types\n\n type_check.expect(\n a_type.dtype == numpy.float32,\n b_type.dtype == numpy.float32\n )\n\n _check_ndim(a_type)\n _check_ndim(b_type)\n\n a_type = _convert_type(a_type)\n b_type = _convert_type(b_type)\n a_idx = _get_check_index(self.transa, False)\n b_idx = _get_check_index(self.transb, True)\n type_check.expect(\n a_type.shape[a_idx] == b_type.shape[b_idx]\n )\n\n def forward(self, x):\n return _matmul(x[0], x[1], transa=self.transa, transb=self.transb),\n\n def backward(self, x, gy):\n gx0 = _matmul(\n gy[0], x[1], transb=not self.transb, transout=self.transa\n ).reshape(x[0].shape)\n gx1 = _matmul(\n x[0], gy[0], transa=not self.transa, transout=self.transb\n ).reshape(x[1].shape)\n return gx0, gx1\n\n\ndef matmul(a, b, transa=False, transb=False):\n \"\"\"Computes the matrix multiplication of two arrays.\n\n Args:\n a (Variable): The left operand of the matrix multiplication.\n A 1-D array of shape (N,) is considered as an Nx1 matrix.\n A 2-D array of shape (M, N) is considered as an MxN matrix.\n b (Variable): The right operand of the matrix multiplication.\n Its array is treated as a matrix in the same way as ``a``'s array.\n transa (bool): If true, transpose a.\n transb (bool): If true, transpose b.\n\n Returns:\n ~chainer.Variable: The result of the matrix multiplication as a 2-D\n array.\n \"\"\"\n return MatMul(transa=transa, transb=transb)(a, b)\n\n\nclass BatchMatMul(function.Function):\n def __init__(self, transa=False, transb=False):\n self.transa = transa\n self.transb = transb\n\n def _output_shape(self, a, b):\n batch_size = a.shape[0]\n a_mat_shape = _as_mat(a[0]).shape\n b_mat_shape = _as_mat(b[0]).shape\n m = a_mat_shape[1 if self.transa else 0]\n n = b_mat_shape[0 if self.transb else 1]\n return (batch_size, m, n)\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n a_type, b_type = in_types\n\n type_check.expect(\n a_type.dtype == numpy.float32,\n b_type.dtype == numpy.float32\n )\n\n _check_ndim(a_type, lower=2, upper=3)\n _check_ndim(b_type, lower=2, upper=3)\n\n a_type = _convert_type(a_type, vector_ndim=2)\n b_type = _convert_type(b_type, vector_ndim=2)\n a_idx = _get_check_index(self.transa, False, row_idx=1, col_idx=2)\n b_idx = _get_check_index(self.transb, True, row_idx=1, col_idx=2)\n type_check.expect(\n a_type.shape[a_idx] == b_type.shape[b_idx]\n )\n\n def forward_cpu(self, x):\n a, b = x\n batch_size = a.shape[0]\n shape = self._output_shape(a, b)\n ret_dtype = numpy.find_common_type([a.dtype, b.dtype], [])\n ret = numpy.empty(shape, dtype=ret_dtype)\n for i in six.moves.range(batch_size):\n ret[i] = _matmul(\n a[i], b[i], transa=self.transa, transb=self.transb)\n return ret,\n\n def backward_cpu(self, x, gy):\n a, b = x\n batch_size = a.shape[0]\n ga = numpy.empty_like(a)\n gb = numpy.empty_like(b)\n for i in six.moves.range(batch_size):\n ga[i] = _matmul(\n gy[0][i], b[i], transb=not self.transb,\n transout=self.transa).reshape(a[0].shape)\n gb[i] = _matmul(\n a[i], gy[0][i], transa=not self.transa,\n transout=self.transb).reshape(b[0].shape)\n return ga, gb\n\n def forward_gpu(self, x):\n a, b = x\n shape = self._output_shape(a, b)\n ret = cuda.zeros(shape)\n _batch_matmul_gpu(\n a, b, transa=self.transa, transb=self.transb, out=ret)\n return ret,\n\n def backward_gpu(self, x, gy):\n a, b = x\n batch_size = a.shape[0]\n ga = cuda.empty((batch_size,) + _as_mat(a[0]).shape)\n gb = cuda.empty((batch_size,) + _as_mat(b[0]).shape)\n _batch_matmul_gpu(\n gy[0], b, transb=not self.transb, transout=self.transa, out=ga)\n _batch_matmul_gpu(\n a, gy[0], transa=not self.transa, transout=self.transb, out=gb)\n ga = ga.reshape(a.shape)\n gb = gb.reshape(b.shape)\n return ga, gb\n\n\ndef batch_matmul(a, b, transa=False, transb=False):\n \"\"\"Computes the batch matrix multiplications of two sets of arrays.\n\n Args:\n a (Variable): The left operand of the batch matrix multiplications.\n A 2-D array of shape (B, N,) is considered as B Nx1 matrices.\n A 3-D array of shape (B, M, N) is considered as B MxN matrices.\n b (Variable): The right operand of the batch matrix multiplications.\n Its array is treated as matrices in the same way as ``a``'s array.\n transa (bool): If true, transpose each matrix in a.\n transb (bool): If true, transpose each matrix in b.\n\n Returns:\n ~chainer.Variable: The result of the batch matrix multiplications as a\n 3-D array.\n \"\"\"\n return BatchMatMul(transa=transa, transb=transb)(a, b)\n", "path": "chainer/functions/math/matmul.py"}]} | 3,507 | 197 |
gh_patches_debug_3634 | rasdani/github-patches | git_diff | getsentry__sentry-53802 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Recently Active Members should be first on the dropdown
When selecting an action for an issue alert, Recently Active Members should be the first option and pre-selected.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/notifications/types.py`
Content:
```
1 from __future__ import annotations
2
3 from enum import Enum
4 from typing import Optional
5
6 from sentry.services.hybrid_cloud import ValueEqualityEnum
7
8 """
9 TODO(postgres): We've encoded these enums as integers to facilitate
10 communication with the DB. We'd prefer to encode them as strings to facilitate
11 communication with the API and plan to do so as soon as we use native enums in
12 Postgres. In the meantime each enum has an adjacent object that maps the
13 integers to their string values.
14 """
15
16
17 def get_notification_setting_type_name(value: int | NotificationSettingTypes) -> Optional[str]:
18 return NOTIFICATION_SETTING_TYPES.get(NotificationSettingTypes(value))
19
20
21 def get_notification_setting_value_name(value: int) -> Optional[str]:
22 return NOTIFICATION_SETTING_OPTION_VALUES.get(NotificationSettingOptionValues(value))
23
24
25 def get_notification_scope_name(value: int) -> Optional[str]:
26 return NOTIFICATION_SCOPE_TYPE.get(NotificationScopeType(value))
27
28
29 class NotificationSettingTypes(ValueEqualityEnum):
30 """
31 Each of these categories of Notification settings has at least an option for
32 "on" or "off". Workflow also includes SUBSCRIBE_ONLY and Deploy also
33 includes COMMITTED_ONLY and both of these values are described below.
34 """
35
36 # Control all notification types. Currently unused.
37 DEFAULT = 0
38
39 # When Sentry sees there is a new code deploy.
40 DEPLOY = 10
41
42 # When Sentry sees and issue that triggers an Alert Rule.
43 ISSUE_ALERTS = 20
44
45 # Notifications for changes in assignment, resolution, comments, etc.
46 WORKFLOW = 30
47
48 # Notification when an issue happens shortly after your release. This notification type is no longer supported.
49 ACTIVE_RELEASE = 31
50
51 # Notifications that require approval like a request to invite a member
52 APPROVAL = 40
53
54 # Notifications about quotas
55 QUOTA = 50
56
57 # Sub category of quotas for each event category
58 QUOTA_ERRORS = 51
59 QUOTA_TRANSACTIONS = 52
60 QUOTA_ATTACHMENTS = 53
61 QUOTA_REPLAYS = 56
62
63 # Sub category of quotas for warnings before hitting the actual limit
64 QUOTA_WARNINGS = 54
65
66 # Sub category of quotas for spend allocation notifications
67 QUOTA_SPEND_ALLOCATIONS = 55
68
69 # Notifications about spikes
70 SPIKE_PROTECTION = 60
71
72
73 NOTIFICATION_SETTING_TYPES = {
74 NotificationSettingTypes.DEFAULT: "default",
75 NotificationSettingTypes.DEPLOY: "deploy",
76 NotificationSettingTypes.ISSUE_ALERTS: "alerts",
77 NotificationSettingTypes.WORKFLOW: "workflow",
78 NotificationSettingTypes.ACTIVE_RELEASE: "activeRelease",
79 NotificationSettingTypes.APPROVAL: "approval",
80 NotificationSettingTypes.QUOTA: "quota",
81 NotificationSettingTypes.QUOTA_ERRORS: "quotaErrors",
82 NotificationSettingTypes.QUOTA_TRANSACTIONS: "quotaTransactions",
83 NotificationSettingTypes.QUOTA_ATTACHMENTS: "quotaAttachments",
84 NotificationSettingTypes.QUOTA_REPLAYS: "quotaReplays",
85 NotificationSettingTypes.QUOTA_WARNINGS: "quotaWarnings",
86 NotificationSettingTypes.QUOTA_SPEND_ALLOCATIONS: "quotaSpendAllocations",
87 NotificationSettingTypes.SPIKE_PROTECTION: "spikeProtection",
88 }
89
90
91 class NotificationSettingOptionValues(ValueEqualityEnum):
92 """
93 An empty row in the DB should be represented as
94 NotificationSettingOptionValues.DEFAULT.
95 """
96
97 # Defer to a setting one level up.
98 DEFAULT = 0
99
100 # Mute this kind of notification.
101 NEVER = 10
102
103 # Un-mute this kind of notification.
104 ALWAYS = 20
105
106 # Workflow only. Only send notifications about Issues that the target has
107 # explicitly or implicitly opted-into.
108 SUBSCRIBE_ONLY = 30
109
110 # Deploy only. Only send notifications when the set of changes in the deploy
111 # included a commit authored by the target.
112 COMMITTED_ONLY = 40
113
114
115 NOTIFICATION_SETTING_OPTION_VALUES = {
116 NotificationSettingOptionValues.DEFAULT: "default",
117 NotificationSettingOptionValues.NEVER: "never",
118 NotificationSettingOptionValues.ALWAYS: "always",
119 NotificationSettingOptionValues.SUBSCRIBE_ONLY: "subscribe_only",
120 NotificationSettingOptionValues.COMMITTED_ONLY: "committed_only",
121 }
122
123
124 class NotificationScopeType(ValueEqualityEnum):
125 USER = 0
126 ORGANIZATION = 10
127 PROJECT = 20
128 TEAM = 30
129
130
131 NOTIFICATION_SCOPE_TYPE = {
132 NotificationScopeType.USER: "user",
133 NotificationScopeType.ORGANIZATION: "organization",
134 NotificationScopeType.PROJECT: "project",
135 NotificationScopeType.TEAM: "team",
136 }
137
138
139 class FineTuningAPIKey(Enum):
140 ALERTS = "alerts"
141 APPROVAL = "approval"
142 DEPLOY = "deploy"
143 EMAIL = "email"
144 QUOTA = "quota"
145 REPORTS = "reports"
146 WORKFLOW = "workflow"
147 SPIKE_PROTECTION = "spikeProtection"
148
149
150 class UserOptionsSettingsKey(Enum):
151 DEPLOY = "deployNotifications"
152 SELF_ACTIVITY = "personalActivityNotifications"
153 SELF_ASSIGN = "selfAssignOnResolve"
154 SUBSCRIBE_BY_DEFAULT = "subscribeByDefault"
155 WORKFLOW = "workflowNotifications"
156 ACTIVE_RELEASE = "activeReleaseNotifications"
157 APPROVAL = "approvalNotifications"
158 QUOTA = "quotaNotifications"
159 SPIKE_PROTECTION = "spikeProtectionNotifications"
160
161
162 VALID_VALUES_FOR_KEY = {
163 NotificationSettingTypes.APPROVAL: {
164 NotificationSettingOptionValues.ALWAYS,
165 NotificationSettingOptionValues.NEVER,
166 },
167 NotificationSettingTypes.DEPLOY: {
168 NotificationSettingOptionValues.ALWAYS,
169 NotificationSettingOptionValues.COMMITTED_ONLY,
170 NotificationSettingOptionValues.NEVER,
171 },
172 NotificationSettingTypes.ISSUE_ALERTS: {
173 NotificationSettingOptionValues.ALWAYS,
174 NotificationSettingOptionValues.NEVER,
175 },
176 NotificationSettingTypes.QUOTA: {
177 NotificationSettingOptionValues.ALWAYS,
178 NotificationSettingOptionValues.NEVER,
179 },
180 NotificationSettingTypes.QUOTA_ERRORS: {
181 NotificationSettingOptionValues.ALWAYS,
182 NotificationSettingOptionValues.NEVER,
183 },
184 NotificationSettingTypes.QUOTA_TRANSACTIONS: {
185 NotificationSettingOptionValues.ALWAYS,
186 NotificationSettingOptionValues.NEVER,
187 },
188 NotificationSettingTypes.QUOTA_ATTACHMENTS: {
189 NotificationSettingOptionValues.ALWAYS,
190 NotificationSettingOptionValues.NEVER,
191 },
192 NotificationSettingTypes.QUOTA_REPLAYS: {
193 NotificationSettingOptionValues.ALWAYS,
194 NotificationSettingOptionValues.NEVER,
195 },
196 NotificationSettingTypes.QUOTA_WARNINGS: {
197 NotificationSettingOptionValues.ALWAYS,
198 NotificationSettingOptionValues.NEVER,
199 },
200 NotificationSettingTypes.QUOTA_SPEND_ALLOCATIONS: {
201 NotificationSettingOptionValues.ALWAYS,
202 NotificationSettingOptionValues.NEVER,
203 },
204 NotificationSettingTypes.WORKFLOW: {
205 NotificationSettingOptionValues.ALWAYS,
206 NotificationSettingOptionValues.SUBSCRIBE_ONLY,
207 NotificationSettingOptionValues.NEVER,
208 },
209 NotificationSettingTypes.SPIKE_PROTECTION: {
210 NotificationSettingOptionValues.ALWAYS,
211 NotificationSettingOptionValues.NEVER,
212 },
213 }
214
215
216 class GroupSubscriptionReason:
217 implicit = -1 # not for use as a persisted field value
218 committed = -2 # not for use as a persisted field value
219 processing_issue = -3 # not for use as a persisted field value
220
221 unknown = 0
222 comment = 1
223 assigned = 2
224 bookmark = 3
225 status_change = 4
226 deploy_setting = 5
227 mentioned = 6
228 team_mentioned = 7
229
230 descriptions = {
231 implicit: "have opted to receive updates for all issues within "
232 "projects that you are a member of",
233 committed: "were involved in a commit that is part of this release",
234 processing_issue: "are subscribed to alerts for this project",
235 comment: "have commented on this issue",
236 assigned: "have been assigned to this issue",
237 bookmark: "have bookmarked this issue",
238 status_change: "have changed the resolution status of this issue",
239 deploy_setting: "opted to receive all deploy notifications for this organization",
240 mentioned: "have been mentioned in this issue",
241 team_mentioned: "are a member of a team mentioned in this issue",
242 }
243
244
245 SUBSCRIPTION_REASON_MAP = {
246 GroupSubscriptionReason.comment: "commented",
247 GroupSubscriptionReason.assigned: "assigned",
248 GroupSubscriptionReason.bookmark: "bookmarked",
249 GroupSubscriptionReason.status_change: "changed_status",
250 GroupSubscriptionReason.mentioned: "mentioned",
251 }
252
253
254 class ActionTargetType(Enum):
255 ISSUE_OWNERS = "IssueOwners"
256 TEAM = "Team"
257 MEMBER = "Member"
258
259
260 ACTION_CHOICES = [
261 (ActionTargetType.ISSUE_OWNERS.value, "Issue Owners"),
262 (ActionTargetType.TEAM.value, "Team"),
263 (ActionTargetType.MEMBER.value, "Member"),
264 ]
265
266
267 class FallthroughChoiceType(Enum):
268 ALL_MEMBERS = "AllMembers"
269 ACTIVE_MEMBERS = "ActiveMembers"
270 NO_ONE = "NoOne"
271
272
273 FALLTHROUGH_CHOICES = [
274 (FallthroughChoiceType.ALL_MEMBERS.value, "All Project Members"),
275 (FallthroughChoiceType.ACTIVE_MEMBERS.value, "Recently Active Members"),
276 (FallthroughChoiceType.NO_ONE.value, "No One"),
277 ]
278
279
280 class AssigneeTargetType(Enum):
281 UNASSIGNED = "Unassigned"
282 TEAM = "Team"
283 MEMBER = "Member"
284
285
286 ASSIGNEE_CHOICES = [
287 (AssigneeTargetType.UNASSIGNED.value, "Unassigned"),
288 (AssigneeTargetType.TEAM.value, "Team"),
289 (AssigneeTargetType.MEMBER.value, "Member"),
290 ]
291
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/notifications/types.py b/src/sentry/notifications/types.py
--- a/src/sentry/notifications/types.py
+++ b/src/sentry/notifications/types.py
@@ -271,8 +271,8 @@
FALLTHROUGH_CHOICES = [
- (FallthroughChoiceType.ALL_MEMBERS.value, "All Project Members"),
(FallthroughChoiceType.ACTIVE_MEMBERS.value, "Recently Active Members"),
+ (FallthroughChoiceType.ALL_MEMBERS.value, "All Project Members"),
(FallthroughChoiceType.NO_ONE.value, "No One"),
]
| {"golden_diff": "diff --git a/src/sentry/notifications/types.py b/src/sentry/notifications/types.py\n--- a/src/sentry/notifications/types.py\n+++ b/src/sentry/notifications/types.py\n@@ -271,8 +271,8 @@\n \n \n FALLTHROUGH_CHOICES = [\n- (FallthroughChoiceType.ALL_MEMBERS.value, \"All Project Members\"),\n (FallthroughChoiceType.ACTIVE_MEMBERS.value, \"Recently Active Members\"),\n+ (FallthroughChoiceType.ALL_MEMBERS.value, \"All Project Members\"),\n (FallthroughChoiceType.NO_ONE.value, \"No One\"),\n ]\n", "issue": "Recently Active Members should be first on the dropdown\nWhen selecting an action for an issue alert, Recently Active Members should be the first option and pre-selected.\n\n\n\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom enum import Enum\nfrom typing import Optional\n\nfrom sentry.services.hybrid_cloud import ValueEqualityEnum\n\n\"\"\"\nTODO(postgres): We've encoded these enums as integers to facilitate\ncommunication with the DB. We'd prefer to encode them as strings to facilitate\ncommunication with the API and plan to do so as soon as we use native enums in\nPostgres. In the meantime each enum has an adjacent object that maps the\nintegers to their string values.\n\"\"\"\n\n\ndef get_notification_setting_type_name(value: int | NotificationSettingTypes) -> Optional[str]:\n return NOTIFICATION_SETTING_TYPES.get(NotificationSettingTypes(value))\n\n\ndef get_notification_setting_value_name(value: int) -> Optional[str]:\n return NOTIFICATION_SETTING_OPTION_VALUES.get(NotificationSettingOptionValues(value))\n\n\ndef get_notification_scope_name(value: int) -> Optional[str]:\n return NOTIFICATION_SCOPE_TYPE.get(NotificationScopeType(value))\n\n\nclass NotificationSettingTypes(ValueEqualityEnum):\n \"\"\"\n Each of these categories of Notification settings has at least an option for\n \"on\" or \"off\". Workflow also includes SUBSCRIBE_ONLY and Deploy also\n includes COMMITTED_ONLY and both of these values are described below.\n \"\"\"\n\n # Control all notification types. Currently unused.\n DEFAULT = 0\n\n # When Sentry sees there is a new code deploy.\n DEPLOY = 10\n\n # When Sentry sees and issue that triggers an Alert Rule.\n ISSUE_ALERTS = 20\n\n # Notifications for changes in assignment, resolution, comments, etc.\n WORKFLOW = 30\n\n # Notification when an issue happens shortly after your release. This notification type is no longer supported.\n ACTIVE_RELEASE = 31\n\n # Notifications that require approval like a request to invite a member\n APPROVAL = 40\n\n # Notifications about quotas\n QUOTA = 50\n\n # Sub category of quotas for each event category\n QUOTA_ERRORS = 51\n QUOTA_TRANSACTIONS = 52\n QUOTA_ATTACHMENTS = 53\n QUOTA_REPLAYS = 56\n\n # Sub category of quotas for warnings before hitting the actual limit\n QUOTA_WARNINGS = 54\n\n # Sub category of quotas for spend allocation notifications\n QUOTA_SPEND_ALLOCATIONS = 55\n\n # Notifications about spikes\n SPIKE_PROTECTION = 60\n\n\nNOTIFICATION_SETTING_TYPES = {\n NotificationSettingTypes.DEFAULT: \"default\",\n NotificationSettingTypes.DEPLOY: \"deploy\",\n NotificationSettingTypes.ISSUE_ALERTS: \"alerts\",\n NotificationSettingTypes.WORKFLOW: \"workflow\",\n NotificationSettingTypes.ACTIVE_RELEASE: \"activeRelease\",\n NotificationSettingTypes.APPROVAL: \"approval\",\n NotificationSettingTypes.QUOTA: \"quota\",\n NotificationSettingTypes.QUOTA_ERRORS: \"quotaErrors\",\n NotificationSettingTypes.QUOTA_TRANSACTIONS: \"quotaTransactions\",\n NotificationSettingTypes.QUOTA_ATTACHMENTS: \"quotaAttachments\",\n NotificationSettingTypes.QUOTA_REPLAYS: \"quotaReplays\",\n NotificationSettingTypes.QUOTA_WARNINGS: \"quotaWarnings\",\n NotificationSettingTypes.QUOTA_SPEND_ALLOCATIONS: \"quotaSpendAllocations\",\n NotificationSettingTypes.SPIKE_PROTECTION: \"spikeProtection\",\n}\n\n\nclass NotificationSettingOptionValues(ValueEqualityEnum):\n \"\"\"\n An empty row in the DB should be represented as\n NotificationSettingOptionValues.DEFAULT.\n \"\"\"\n\n # Defer to a setting one level up.\n DEFAULT = 0\n\n # Mute this kind of notification.\n NEVER = 10\n\n # Un-mute this kind of notification.\n ALWAYS = 20\n\n # Workflow only. Only send notifications about Issues that the target has\n # explicitly or implicitly opted-into.\n SUBSCRIBE_ONLY = 30\n\n # Deploy only. Only send notifications when the set of changes in the deploy\n # included a commit authored by the target.\n COMMITTED_ONLY = 40\n\n\nNOTIFICATION_SETTING_OPTION_VALUES = {\n NotificationSettingOptionValues.DEFAULT: \"default\",\n NotificationSettingOptionValues.NEVER: \"never\",\n NotificationSettingOptionValues.ALWAYS: \"always\",\n NotificationSettingOptionValues.SUBSCRIBE_ONLY: \"subscribe_only\",\n NotificationSettingOptionValues.COMMITTED_ONLY: \"committed_only\",\n}\n\n\nclass NotificationScopeType(ValueEqualityEnum):\n USER = 0\n ORGANIZATION = 10\n PROJECT = 20\n TEAM = 30\n\n\nNOTIFICATION_SCOPE_TYPE = {\n NotificationScopeType.USER: \"user\",\n NotificationScopeType.ORGANIZATION: \"organization\",\n NotificationScopeType.PROJECT: \"project\",\n NotificationScopeType.TEAM: \"team\",\n}\n\n\nclass FineTuningAPIKey(Enum):\n ALERTS = \"alerts\"\n APPROVAL = \"approval\"\n DEPLOY = \"deploy\"\n EMAIL = \"email\"\n QUOTA = \"quota\"\n REPORTS = \"reports\"\n WORKFLOW = \"workflow\"\n SPIKE_PROTECTION = \"spikeProtection\"\n\n\nclass UserOptionsSettingsKey(Enum):\n DEPLOY = \"deployNotifications\"\n SELF_ACTIVITY = \"personalActivityNotifications\"\n SELF_ASSIGN = \"selfAssignOnResolve\"\n SUBSCRIBE_BY_DEFAULT = \"subscribeByDefault\"\n WORKFLOW = \"workflowNotifications\"\n ACTIVE_RELEASE = \"activeReleaseNotifications\"\n APPROVAL = \"approvalNotifications\"\n QUOTA = \"quotaNotifications\"\n SPIKE_PROTECTION = \"spikeProtectionNotifications\"\n\n\nVALID_VALUES_FOR_KEY = {\n NotificationSettingTypes.APPROVAL: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.DEPLOY: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.COMMITTED_ONLY,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.ISSUE_ALERTS: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.QUOTA: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.QUOTA_ERRORS: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.QUOTA_TRANSACTIONS: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.QUOTA_ATTACHMENTS: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.QUOTA_REPLAYS: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.QUOTA_WARNINGS: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.QUOTA_SPEND_ALLOCATIONS: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.WORKFLOW: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.SUBSCRIBE_ONLY,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.SPIKE_PROTECTION: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n}\n\n\nclass GroupSubscriptionReason:\n implicit = -1 # not for use as a persisted field value\n committed = -2 # not for use as a persisted field value\n processing_issue = -3 # not for use as a persisted field value\n\n unknown = 0\n comment = 1\n assigned = 2\n bookmark = 3\n status_change = 4\n deploy_setting = 5\n mentioned = 6\n team_mentioned = 7\n\n descriptions = {\n implicit: \"have opted to receive updates for all issues within \"\n \"projects that you are a member of\",\n committed: \"were involved in a commit that is part of this release\",\n processing_issue: \"are subscribed to alerts for this project\",\n comment: \"have commented on this issue\",\n assigned: \"have been assigned to this issue\",\n bookmark: \"have bookmarked this issue\",\n status_change: \"have changed the resolution status of this issue\",\n deploy_setting: \"opted to receive all deploy notifications for this organization\",\n mentioned: \"have been mentioned in this issue\",\n team_mentioned: \"are a member of a team mentioned in this issue\",\n }\n\n\nSUBSCRIPTION_REASON_MAP = {\n GroupSubscriptionReason.comment: \"commented\",\n GroupSubscriptionReason.assigned: \"assigned\",\n GroupSubscriptionReason.bookmark: \"bookmarked\",\n GroupSubscriptionReason.status_change: \"changed_status\",\n GroupSubscriptionReason.mentioned: \"mentioned\",\n}\n\n\nclass ActionTargetType(Enum):\n ISSUE_OWNERS = \"IssueOwners\"\n TEAM = \"Team\"\n MEMBER = \"Member\"\n\n\nACTION_CHOICES = [\n (ActionTargetType.ISSUE_OWNERS.value, \"Issue Owners\"),\n (ActionTargetType.TEAM.value, \"Team\"),\n (ActionTargetType.MEMBER.value, \"Member\"),\n]\n\n\nclass FallthroughChoiceType(Enum):\n ALL_MEMBERS = \"AllMembers\"\n ACTIVE_MEMBERS = \"ActiveMembers\"\n NO_ONE = \"NoOne\"\n\n\nFALLTHROUGH_CHOICES = [\n (FallthroughChoiceType.ALL_MEMBERS.value, \"All Project Members\"),\n (FallthroughChoiceType.ACTIVE_MEMBERS.value, \"Recently Active Members\"),\n (FallthroughChoiceType.NO_ONE.value, \"No One\"),\n]\n\n\nclass AssigneeTargetType(Enum):\n UNASSIGNED = \"Unassigned\"\n TEAM = \"Team\"\n MEMBER = \"Member\"\n\n\nASSIGNEE_CHOICES = [\n (AssigneeTargetType.UNASSIGNED.value, \"Unassigned\"),\n (AssigneeTargetType.TEAM.value, \"Team\"),\n (AssigneeTargetType.MEMBER.value, \"Member\"),\n]\n", "path": "src/sentry/notifications/types.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom enum import Enum\nfrom typing import Optional\n\nfrom sentry.services.hybrid_cloud import ValueEqualityEnum\n\n\"\"\"\nTODO(postgres): We've encoded these enums as integers to facilitate\ncommunication with the DB. We'd prefer to encode them as strings to facilitate\ncommunication with the API and plan to do so as soon as we use native enums in\nPostgres. In the meantime each enum has an adjacent object that maps the\nintegers to their string values.\n\"\"\"\n\n\ndef get_notification_setting_type_name(value: int | NotificationSettingTypes) -> Optional[str]:\n return NOTIFICATION_SETTING_TYPES.get(NotificationSettingTypes(value))\n\n\ndef get_notification_setting_value_name(value: int) -> Optional[str]:\n return NOTIFICATION_SETTING_OPTION_VALUES.get(NotificationSettingOptionValues(value))\n\n\ndef get_notification_scope_name(value: int) -> Optional[str]:\n return NOTIFICATION_SCOPE_TYPE.get(NotificationScopeType(value))\n\n\nclass NotificationSettingTypes(ValueEqualityEnum):\n \"\"\"\n Each of these categories of Notification settings has at least an option for\n \"on\" or \"off\". Workflow also includes SUBSCRIBE_ONLY and Deploy also\n includes COMMITTED_ONLY and both of these values are described below.\n \"\"\"\n\n # Control all notification types. Currently unused.\n DEFAULT = 0\n\n # When Sentry sees there is a new code deploy.\n DEPLOY = 10\n\n # When Sentry sees and issue that triggers an Alert Rule.\n ISSUE_ALERTS = 20\n\n # Notifications for changes in assignment, resolution, comments, etc.\n WORKFLOW = 30\n\n # Notification when an issue happens shortly after your release. This notification type is no longer supported.\n ACTIVE_RELEASE = 31\n\n # Notifications that require approval like a request to invite a member\n APPROVAL = 40\n\n # Notifications about quotas\n QUOTA = 50\n\n # Sub category of quotas for each event category\n QUOTA_ERRORS = 51\n QUOTA_TRANSACTIONS = 52\n QUOTA_ATTACHMENTS = 53\n QUOTA_REPLAYS = 56\n\n # Sub category of quotas for warnings before hitting the actual limit\n QUOTA_WARNINGS = 54\n\n # Sub category of quotas for spend allocation notifications\n QUOTA_SPEND_ALLOCATIONS = 55\n\n # Notifications about spikes\n SPIKE_PROTECTION = 60\n\n\nNOTIFICATION_SETTING_TYPES = {\n NotificationSettingTypes.DEFAULT: \"default\",\n NotificationSettingTypes.DEPLOY: \"deploy\",\n NotificationSettingTypes.ISSUE_ALERTS: \"alerts\",\n NotificationSettingTypes.WORKFLOW: \"workflow\",\n NotificationSettingTypes.ACTIVE_RELEASE: \"activeRelease\",\n NotificationSettingTypes.APPROVAL: \"approval\",\n NotificationSettingTypes.QUOTA: \"quota\",\n NotificationSettingTypes.QUOTA_ERRORS: \"quotaErrors\",\n NotificationSettingTypes.QUOTA_TRANSACTIONS: \"quotaTransactions\",\n NotificationSettingTypes.QUOTA_ATTACHMENTS: \"quotaAttachments\",\n NotificationSettingTypes.QUOTA_REPLAYS: \"quotaReplays\",\n NotificationSettingTypes.QUOTA_WARNINGS: \"quotaWarnings\",\n NotificationSettingTypes.QUOTA_SPEND_ALLOCATIONS: \"quotaSpendAllocations\",\n NotificationSettingTypes.SPIKE_PROTECTION: \"spikeProtection\",\n}\n\n\nclass NotificationSettingOptionValues(ValueEqualityEnum):\n \"\"\"\n An empty row in the DB should be represented as\n NotificationSettingOptionValues.DEFAULT.\n \"\"\"\n\n # Defer to a setting one level up.\n DEFAULT = 0\n\n # Mute this kind of notification.\n NEVER = 10\n\n # Un-mute this kind of notification.\n ALWAYS = 20\n\n # Workflow only. Only send notifications about Issues that the target has\n # explicitly or implicitly opted-into.\n SUBSCRIBE_ONLY = 30\n\n # Deploy only. Only send notifications when the set of changes in the deploy\n # included a commit authored by the target.\n COMMITTED_ONLY = 40\n\n\nNOTIFICATION_SETTING_OPTION_VALUES = {\n NotificationSettingOptionValues.DEFAULT: \"default\",\n NotificationSettingOptionValues.NEVER: \"never\",\n NotificationSettingOptionValues.ALWAYS: \"always\",\n NotificationSettingOptionValues.SUBSCRIBE_ONLY: \"subscribe_only\",\n NotificationSettingOptionValues.COMMITTED_ONLY: \"committed_only\",\n}\n\n\nclass NotificationScopeType(ValueEqualityEnum):\n USER = 0\n ORGANIZATION = 10\n PROJECT = 20\n TEAM = 30\n\n\nNOTIFICATION_SCOPE_TYPE = {\n NotificationScopeType.USER: \"user\",\n NotificationScopeType.ORGANIZATION: \"organization\",\n NotificationScopeType.PROJECT: \"project\",\n NotificationScopeType.TEAM: \"team\",\n}\n\n\nclass FineTuningAPIKey(Enum):\n ALERTS = \"alerts\"\n APPROVAL = \"approval\"\n DEPLOY = \"deploy\"\n EMAIL = \"email\"\n QUOTA = \"quota\"\n REPORTS = \"reports\"\n WORKFLOW = \"workflow\"\n SPIKE_PROTECTION = \"spikeProtection\"\n\n\nclass UserOptionsSettingsKey(Enum):\n DEPLOY = \"deployNotifications\"\n SELF_ACTIVITY = \"personalActivityNotifications\"\n SELF_ASSIGN = \"selfAssignOnResolve\"\n SUBSCRIBE_BY_DEFAULT = \"subscribeByDefault\"\n WORKFLOW = \"workflowNotifications\"\n ACTIVE_RELEASE = \"activeReleaseNotifications\"\n APPROVAL = \"approvalNotifications\"\n QUOTA = \"quotaNotifications\"\n SPIKE_PROTECTION = \"spikeProtectionNotifications\"\n\n\nVALID_VALUES_FOR_KEY = {\n NotificationSettingTypes.APPROVAL: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.DEPLOY: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.COMMITTED_ONLY,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.ISSUE_ALERTS: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.QUOTA: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.QUOTA_ERRORS: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.QUOTA_TRANSACTIONS: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.QUOTA_ATTACHMENTS: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.QUOTA_REPLAYS: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.QUOTA_WARNINGS: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.QUOTA_SPEND_ALLOCATIONS: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.WORKFLOW: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.SUBSCRIBE_ONLY,\n NotificationSettingOptionValues.NEVER,\n },\n NotificationSettingTypes.SPIKE_PROTECTION: {\n NotificationSettingOptionValues.ALWAYS,\n NotificationSettingOptionValues.NEVER,\n },\n}\n\n\nclass GroupSubscriptionReason:\n implicit = -1 # not for use as a persisted field value\n committed = -2 # not for use as a persisted field value\n processing_issue = -3 # not for use as a persisted field value\n\n unknown = 0\n comment = 1\n assigned = 2\n bookmark = 3\n status_change = 4\n deploy_setting = 5\n mentioned = 6\n team_mentioned = 7\n\n descriptions = {\n implicit: \"have opted to receive updates for all issues within \"\n \"projects that you are a member of\",\n committed: \"were involved in a commit that is part of this release\",\n processing_issue: \"are subscribed to alerts for this project\",\n comment: \"have commented on this issue\",\n assigned: \"have been assigned to this issue\",\n bookmark: \"have bookmarked this issue\",\n status_change: \"have changed the resolution status of this issue\",\n deploy_setting: \"opted to receive all deploy notifications for this organization\",\n mentioned: \"have been mentioned in this issue\",\n team_mentioned: \"are a member of a team mentioned in this issue\",\n }\n\n\nSUBSCRIPTION_REASON_MAP = {\n GroupSubscriptionReason.comment: \"commented\",\n GroupSubscriptionReason.assigned: \"assigned\",\n GroupSubscriptionReason.bookmark: \"bookmarked\",\n GroupSubscriptionReason.status_change: \"changed_status\",\n GroupSubscriptionReason.mentioned: \"mentioned\",\n}\n\n\nclass ActionTargetType(Enum):\n ISSUE_OWNERS = \"IssueOwners\"\n TEAM = \"Team\"\n MEMBER = \"Member\"\n\n\nACTION_CHOICES = [\n (ActionTargetType.ISSUE_OWNERS.value, \"Issue Owners\"),\n (ActionTargetType.TEAM.value, \"Team\"),\n (ActionTargetType.MEMBER.value, \"Member\"),\n]\n\n\nclass FallthroughChoiceType(Enum):\n ALL_MEMBERS = \"AllMembers\"\n ACTIVE_MEMBERS = \"ActiveMembers\"\n NO_ONE = \"NoOne\"\n\n\nFALLTHROUGH_CHOICES = [\n (FallthroughChoiceType.ACTIVE_MEMBERS.value, \"Recently Active Members\"),\n (FallthroughChoiceType.ALL_MEMBERS.value, \"All Project Members\"),\n (FallthroughChoiceType.NO_ONE.value, \"No One\"),\n]\n\n\nclass AssigneeTargetType(Enum):\n UNASSIGNED = \"Unassigned\"\n TEAM = \"Team\"\n MEMBER = \"Member\"\n\n\nASSIGNEE_CHOICES = [\n (AssigneeTargetType.UNASSIGNED.value, \"Unassigned\"),\n (AssigneeTargetType.TEAM.value, \"Team\"),\n (AssigneeTargetType.MEMBER.value, \"Member\"),\n]\n", "path": "src/sentry/notifications/types.py"}]} | 3,246 | 124 |
gh_patches_debug_28749 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-4040 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cloudtrail filter `is-shadow` always matches all resources
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `c7n/resources/cloudtrail.py`
Content:
```
1 # Copyright 2017-2019 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from __future__ import absolute_import, division, print_function, unicode_literals
15
16 import logging
17 import operator
18
19 from c7n.actions import Action
20 from c7n.exceptions import PolicyValidationError
21 from c7n.filters import ValueFilter, Filter
22 from c7n.manager import resources
23 from c7n.query import QueryResourceManager
24 from c7n.utils import local_session, type_schema
25
26 from .aws import shape_validate, Arn
27
28 log = logging.getLogger('c7n.resources.cloudtrail')
29
30
31 @resources.register('cloudtrail')
32 class CloudTrail(QueryResourceManager):
33
34 class resource_type(object):
35 service = 'cloudtrail'
36 enum_spec = ('describe_trails', 'trailList', None)
37 filter_name = 'trailNameList'
38 filter_type = 'list'
39 arn = id = 'TrailARN'
40 name = 'Name'
41 dimension = None
42 config_type = "AWS::CloudTrail::Trail"
43
44
45 @CloudTrail.filter_registry.register('is-shadow')
46 class IsShadow(Filter):
47 """Identify shadow trails (secondary copies), shadow trails
48 can't be modified directly, the origin trail needs to be modified.
49
50 Shadow trails are created for multi-region trails as well for
51 organizational trails.
52 """
53 schema = type_schema('is-shadow', state={'type': 'boolean'})
54 permissions = ('cloudtrail:DescribeTrails',)
55 embedded = False
56
57 def process(self, resources, event=None):
58 anded = lambda x: True # NOQA
59 op = self.data.get('state', True) and anded or operator.__not__
60 rcount = len(resources)
61 trails = [t for t in resources if op(self.is_shadow(t))]
62 if len(trails) != rcount and self.embedded:
63 self.log.info("implicitly filtering shadow trails %d -> %d",
64 rcount, len(trails))
65 return trails
66
67 def is_shadow(self, t):
68 if t.get('IsOrganizationTrail') and self.manager.config.account_id not in t['TrailARN']:
69 return True
70 if t.get('IsMultiRegionTrail') and t['HomeRegion'] not in t['TrailARN']:
71 return True
72
73
74 @CloudTrail.filter_registry.register('status')
75 class Status(ValueFilter):
76 """Filter a cloudtrail by its status.
77
78 :Example:
79
80 .. code-block:: yaml
81
82 policies:
83 - name: cloudtrail-not-active
84 resource: aws.cloudtrail
85 filters:
86 - type: status
87 key: IsLogging
88 value: False
89 """
90
91 schema = type_schema('status', rinherit=ValueFilter.schema)
92 permissions = ('cloudtrail:GetTrailStatus',)
93 annotation_key = 'c7n:TrailStatus'
94
95 def process(self, resources, event=None):
96 for r in resources:
97 region = self.manager.config.region
98 trail_arn = Arn.parse(r['TrailARN'])
99
100 if (r.get('IsOrganizationTrail') and
101 self.manager.config.account_id != trail_arn.account_id):
102 continue
103 if r.get('HomeRegion') and r['HomeRegion'] != region:
104 region = trail_arn.region
105 if self.annotation_key in r:
106 continue
107 client = local_session(self.manager.session_factory).client(
108 'cloudtrail', region_name=region)
109 status = client.get_trail_status(Name=r['Name'])
110 status.pop('ResponseMetadata')
111 r[self.annotation_key] = status
112
113 return super(Status, self).process(resources)
114
115 def __call__(self, r):
116 return self.match(r['c7n:TrailStatus'])
117
118
119 @CloudTrail.action_registry.register('update-trail')
120 class UpdateTrail(Action):
121 """Update trail attributes.
122
123 :Example:
124
125 .. code-block:: yaml
126
127 policies:
128 - name: cloudtrail-set-log
129 resource: aws.cloudtrail
130 filters:
131 - or:
132 - KmsKeyId: empty
133 - LogFileValidationEnabled: false
134 actions:
135 - type: update-trail
136 attributes:
137 KmsKeyId: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef
138 EnableLogFileValidation: true
139 """
140 schema = type_schema(
141 'update-trail',
142 attributes={'type': 'object'},
143 required=('attributes',))
144 shape = 'UpdateTrailRequest'
145 permissions = ('cloudtrail:UpdateTrail',)
146
147 def validate(self):
148 attrs = dict(self.data['attributes'])
149 if 'Name' in attrs:
150 raise PolicyValidationError(
151 "Can't include Name in update-trail action")
152 attrs['Name'] = 'PolicyValidation'
153 return shape_validate(
154 attrs,
155 self.shape,
156 self.manager.resource_type.service)
157
158 def process(self, resources):
159 client = local_session(self.manager.session_factory).client('cloudtrail')
160 shadow_check = IsShadow({'state': False}, self.manager)
161 shadow_check.embedded = True
162 resources = shadow_check.process(resources)
163
164 for r in resources:
165 client.update_trail(
166 Name=r['Name'],
167 **self.data['attributes'])
168
169
170 @CloudTrail.action_registry.register('set-logging')
171 class SetLogging(Action):
172 """Set the logging state of a trail
173
174 :Example:
175
176 .. code-block:: yaml
177
178 policies:
179 - name: cloudtrail-not-active
180 resource: aws.cloudtrail
181 filters:
182 - type: status
183 key: IsLogging
184 value: False
185 actions:
186 - type: set-logging
187 enabled: True
188 """
189 schema = type_schema(
190 'set-logging', enabled={'type': 'boolean'})
191
192 def get_permissions(self):
193 enable = self.data.get('enabled', True)
194 if enable is True:
195 return ('cloudtrail:StartLogging',)
196 else:
197 return ('cloudtrail:StopLogging',)
198
199 def process(self, resources):
200 client = local_session(self.manager.session_factory).client('cloudtrail')
201 shadow_check = IsShadow({'state': False}, self.manager)
202 shadow_check.embedded = True
203 resources = shadow_check.process(resources)
204 enable = self.data.get('enabled', True)
205
206 for r in resources:
207 if enable:
208 client.start_logging(Name=r['Name'])
209 else:
210 client.stop_logging(Name=r['Name'])
211
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/c7n/resources/cloudtrail.py b/c7n/resources/cloudtrail.py
--- a/c7n/resources/cloudtrail.py
+++ b/c7n/resources/cloudtrail.py
@@ -14,7 +14,6 @@
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
-import operator
from c7n.actions import Action
from c7n.exceptions import PolicyValidationError
@@ -55,10 +54,8 @@
embedded = False
def process(self, resources, event=None):
- anded = lambda x: True # NOQA
- op = self.data.get('state', True) and anded or operator.__not__
rcount = len(resources)
- trails = [t for t in resources if op(self.is_shadow(t))]
+ trails = [t for t in resources if (self.is_shadow(t) == self.data.get('state', True))]
if len(trails) != rcount and self.embedded:
self.log.info("implicitly filtering shadow trails %d -> %d",
rcount, len(trails))
@@ -67,8 +64,9 @@
def is_shadow(self, t):
if t.get('IsOrganizationTrail') and self.manager.config.account_id not in t['TrailARN']:
return True
- if t.get('IsMultiRegionTrail') and t['HomeRegion'] not in t['TrailARN']:
+ if t.get('IsMultiRegionTrail') and t['HomeRegion'] != self.manager.config.region:
return True
+ return False
@CloudTrail.filter_registry.register('status')
| {"golden_diff": "diff --git a/c7n/resources/cloudtrail.py b/c7n/resources/cloudtrail.py\n--- a/c7n/resources/cloudtrail.py\n+++ b/c7n/resources/cloudtrail.py\n@@ -14,7 +14,6 @@\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n import logging\n-import operator\n \n from c7n.actions import Action\n from c7n.exceptions import PolicyValidationError\n@@ -55,10 +54,8 @@\n embedded = False\n \n def process(self, resources, event=None):\n- anded = lambda x: True # NOQA\n- op = self.data.get('state', True) and anded or operator.__not__\n rcount = len(resources)\n- trails = [t for t in resources if op(self.is_shadow(t))]\n+ trails = [t for t in resources if (self.is_shadow(t) == self.data.get('state', True))]\n if len(trails) != rcount and self.embedded:\n self.log.info(\"implicitly filtering shadow trails %d -> %d\",\n rcount, len(trails))\n@@ -67,8 +64,9 @@\n def is_shadow(self, t):\n if t.get('IsOrganizationTrail') and self.manager.config.account_id not in t['TrailARN']:\n return True\n- if t.get('IsMultiRegionTrail') and t['HomeRegion'] not in t['TrailARN']:\n+ if t.get('IsMultiRegionTrail') and t['HomeRegion'] != self.manager.config.region:\n return True\n+ return False\n \n \n @CloudTrail.filter_registry.register('status')\n", "issue": "cloudtrail filter `is-shadow` always matches all resources\n\n", "before_files": [{"content": "# Copyright 2017-2019 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport operator\n\nfrom c7n.actions import Action\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import ValueFilter, Filter\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager\nfrom c7n.utils import local_session, type_schema\n\nfrom .aws import shape_validate, Arn\n\nlog = logging.getLogger('c7n.resources.cloudtrail')\n\n\[email protected]('cloudtrail')\nclass CloudTrail(QueryResourceManager):\n\n class resource_type(object):\n service = 'cloudtrail'\n enum_spec = ('describe_trails', 'trailList', None)\n filter_name = 'trailNameList'\n filter_type = 'list'\n arn = id = 'TrailARN'\n name = 'Name'\n dimension = None\n config_type = \"AWS::CloudTrail::Trail\"\n\n\[email protected]_registry.register('is-shadow')\nclass IsShadow(Filter):\n \"\"\"Identify shadow trails (secondary copies), shadow trails\n can't be modified directly, the origin trail needs to be modified.\n\n Shadow trails are created for multi-region trails as well for\n organizational trails.\n \"\"\"\n schema = type_schema('is-shadow', state={'type': 'boolean'})\n permissions = ('cloudtrail:DescribeTrails',)\n embedded = False\n\n def process(self, resources, event=None):\n anded = lambda x: True # NOQA\n op = self.data.get('state', True) and anded or operator.__not__\n rcount = len(resources)\n trails = [t for t in resources if op(self.is_shadow(t))]\n if len(trails) != rcount and self.embedded:\n self.log.info(\"implicitly filtering shadow trails %d -> %d\",\n rcount, len(trails))\n return trails\n\n def is_shadow(self, t):\n if t.get('IsOrganizationTrail') and self.manager.config.account_id not in t['TrailARN']:\n return True\n if t.get('IsMultiRegionTrail') and t['HomeRegion'] not in t['TrailARN']:\n return True\n\n\[email protected]_registry.register('status')\nclass Status(ValueFilter):\n \"\"\"Filter a cloudtrail by its status.\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-not-active\n resource: aws.cloudtrail\n filters:\n - type: status\n key: IsLogging\n value: False\n \"\"\"\n\n schema = type_schema('status', rinherit=ValueFilter.schema)\n permissions = ('cloudtrail:GetTrailStatus',)\n annotation_key = 'c7n:TrailStatus'\n\n def process(self, resources, event=None):\n for r in resources:\n region = self.manager.config.region\n trail_arn = Arn.parse(r['TrailARN'])\n\n if (r.get('IsOrganizationTrail') and\n self.manager.config.account_id != trail_arn.account_id):\n continue\n if r.get('HomeRegion') and r['HomeRegion'] != region:\n region = trail_arn.region\n if self.annotation_key in r:\n continue\n client = local_session(self.manager.session_factory).client(\n 'cloudtrail', region_name=region)\n status = client.get_trail_status(Name=r['Name'])\n status.pop('ResponseMetadata')\n r[self.annotation_key] = status\n\n return super(Status, self).process(resources)\n\n def __call__(self, r):\n return self.match(r['c7n:TrailStatus'])\n\n\[email protected]_registry.register('update-trail')\nclass UpdateTrail(Action):\n \"\"\"Update trail attributes.\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-set-log\n resource: aws.cloudtrail\n filters:\n - or:\n - KmsKeyId: empty\n - LogFileValidationEnabled: false\n actions:\n - type: update-trail\n attributes:\n KmsKeyId: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef\n EnableLogFileValidation: true\n \"\"\"\n schema = type_schema(\n 'update-trail',\n attributes={'type': 'object'},\n required=('attributes',))\n shape = 'UpdateTrailRequest'\n permissions = ('cloudtrail:UpdateTrail',)\n\n def validate(self):\n attrs = dict(self.data['attributes'])\n if 'Name' in attrs:\n raise PolicyValidationError(\n \"Can't include Name in update-trail action\")\n attrs['Name'] = 'PolicyValidation'\n return shape_validate(\n attrs,\n self.shape,\n self.manager.resource_type.service)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n\n for r in resources:\n client.update_trail(\n Name=r['Name'],\n **self.data['attributes'])\n\n\[email protected]_registry.register('set-logging')\nclass SetLogging(Action):\n \"\"\"Set the logging state of a trail\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-not-active\n resource: aws.cloudtrail\n filters:\n - type: status\n key: IsLogging\n value: False\n actions:\n - type: set-logging\n enabled: True\n \"\"\"\n schema = type_schema(\n 'set-logging', enabled={'type': 'boolean'})\n\n def get_permissions(self):\n enable = self.data.get('enabled', True)\n if enable is True:\n return ('cloudtrail:StartLogging',)\n else:\n return ('cloudtrail:StopLogging',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n enable = self.data.get('enabled', True)\n\n for r in resources:\n if enable:\n client.start_logging(Name=r['Name'])\n else:\n client.stop_logging(Name=r['Name'])\n", "path": "c7n/resources/cloudtrail.py"}], "after_files": [{"content": "# Copyright 2017-2019 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\n\nfrom c7n.actions import Action\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import ValueFilter, Filter\nfrom c7n.manager import resources\nfrom c7n.query import QueryResourceManager\nfrom c7n.utils import local_session, type_schema\n\nfrom .aws import shape_validate, Arn\n\nlog = logging.getLogger('c7n.resources.cloudtrail')\n\n\[email protected]('cloudtrail')\nclass CloudTrail(QueryResourceManager):\n\n class resource_type(object):\n service = 'cloudtrail'\n enum_spec = ('describe_trails', 'trailList', None)\n filter_name = 'trailNameList'\n filter_type = 'list'\n arn = id = 'TrailARN'\n name = 'Name'\n dimension = None\n config_type = \"AWS::CloudTrail::Trail\"\n\n\[email protected]_registry.register('is-shadow')\nclass IsShadow(Filter):\n \"\"\"Identify shadow trails (secondary copies), shadow trails\n can't be modified directly, the origin trail needs to be modified.\n\n Shadow trails are created for multi-region trails as well for\n organizational trails.\n \"\"\"\n schema = type_schema('is-shadow', state={'type': 'boolean'})\n permissions = ('cloudtrail:DescribeTrails',)\n embedded = False\n\n def process(self, resources, event=None):\n rcount = len(resources)\n trails = [t for t in resources if (self.is_shadow(t) == self.data.get('state', True))]\n if len(trails) != rcount and self.embedded:\n self.log.info(\"implicitly filtering shadow trails %d -> %d\",\n rcount, len(trails))\n return trails\n\n def is_shadow(self, t):\n if t.get('IsOrganizationTrail') and self.manager.config.account_id not in t['TrailARN']:\n return True\n if t.get('IsMultiRegionTrail') and t['HomeRegion'] != self.manager.config.region:\n return True\n return False\n\n\[email protected]_registry.register('status')\nclass Status(ValueFilter):\n \"\"\"Filter a cloudtrail by its status.\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-not-active\n resource: aws.cloudtrail\n filters:\n - type: status\n key: IsLogging\n value: False\n \"\"\"\n\n schema = type_schema('status', rinherit=ValueFilter.schema)\n permissions = ('cloudtrail:GetTrailStatus',)\n annotation_key = 'c7n:TrailStatus'\n\n def process(self, resources, event=None):\n for r in resources:\n region = self.manager.config.region\n trail_arn = Arn.parse(r['TrailARN'])\n\n if (r.get('IsOrganizationTrail') and\n self.manager.config.account_id != trail_arn.account_id):\n continue\n if r.get('HomeRegion') and r['HomeRegion'] != region:\n region = trail_arn.region\n if self.annotation_key in r:\n continue\n client = local_session(self.manager.session_factory).client(\n 'cloudtrail', region_name=region)\n status = client.get_trail_status(Name=r['Name'])\n status.pop('ResponseMetadata')\n r[self.annotation_key] = status\n\n return super(Status, self).process(resources)\n\n def __call__(self, r):\n return self.match(r['c7n:TrailStatus'])\n\n\[email protected]_registry.register('update-trail')\nclass UpdateTrail(Action):\n \"\"\"Update trail attributes.\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-set-log\n resource: aws.cloudtrail\n filters:\n - or:\n - KmsKeyId: empty\n - LogFileValidationEnabled: false\n actions:\n - type: update-trail\n attributes:\n KmsKeyId: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef\n EnableLogFileValidation: true\n \"\"\"\n schema = type_schema(\n 'update-trail',\n attributes={'type': 'object'},\n required=('attributes',))\n shape = 'UpdateTrailRequest'\n permissions = ('cloudtrail:UpdateTrail',)\n\n def validate(self):\n attrs = dict(self.data['attributes'])\n if 'Name' in attrs:\n raise PolicyValidationError(\n \"Can't include Name in update-trail action\")\n attrs['Name'] = 'PolicyValidation'\n return shape_validate(\n attrs,\n self.shape,\n self.manager.resource_type.service)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n\n for r in resources:\n client.update_trail(\n Name=r['Name'],\n **self.data['attributes'])\n\n\[email protected]_registry.register('set-logging')\nclass SetLogging(Action):\n \"\"\"Set the logging state of a trail\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-not-active\n resource: aws.cloudtrail\n filters:\n - type: status\n key: IsLogging\n value: False\n actions:\n - type: set-logging\n enabled: True\n \"\"\"\n schema = type_schema(\n 'set-logging', enabled={'type': 'boolean'})\n\n def get_permissions(self):\n enable = self.data.get('enabled', True)\n if enable is True:\n return ('cloudtrail:StartLogging',)\n else:\n return ('cloudtrail:StopLogging',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n enable = self.data.get('enabled', True)\n\n for r in resources:\n if enable:\n client.start_logging(Name=r['Name'])\n else:\n client.stop_logging(Name=r['Name'])\n", "path": "c7n/resources/cloudtrail.py"}]} | 2,306 | 351 |
gh_patches_debug_20804 | rasdani/github-patches | git_diff | opsdroid__opsdroid-844 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Handle slacker connector invalid token exception
<!-- Before you post an issue or if you are unsure about something join our gitter channel https://gitter.im/opsdroid/ and ask away! We are more than happy to help you. -->
# Description
This issue was found by @petri with #763
When we use an invalid token with the slacker connector opsdroid crashes badly. This is probably due to the fact that we changed dependencies and the code doesn't handle the exception raised by the aioslacker library.
We should probably refactor the connect method to check if the exception `slacker.Error: invalid_auth` was raised, if so an error message should be logged and opsdroid should still be able to run.
## Steps to Reproduce
- Set a slack connector with a bad token on config.yaml
- run opsdroid
## Expected Functionality
Opsdroid should still run but a message should be logged that the connector won't be active due to bad token.
## Experienced Functionality
Opsdroid crashes with a traceback
```python
Traceback (most recent call last):
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/__main__.py", line 206, in <module>
init()
File "/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/__main__.py", line 203, in init
main()
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/__main__.py", line 196, in main
opsdroid.load()
File "/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/core.py", line 153, in load
self.start_connectors(self.modules["connectors"])
File "/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/core.py", line 248, in start_connectors
self.eventloop.run_until_complete(connector.connect(self))
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/base_events.py", line 466, in run_until_complete
return future.result()
File "/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/connector/slack/__init__.py", line 50, in connect
connection = await self.slacker.rtm.start()
File "/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/aioslacker/__init__.py", line 97, in __request
raise Error(response.error)
slacker.Error: invalid_auth
Exception ignored in: <bound method BaseEventLoop.__del__ of <_UnixSelectorEventLoop running=False closed=True debug=False>>
Traceback (most recent call last):
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/base_events.py", line 511, in __del__
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/unix_events.py", line 65, in close
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/unix_events.py", line 146, in remove_signal_handler
File "/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/signal.py", line 47, in signal
TypeError: signal handler must be signal.SIG_IGN, signal.SIG_DFL, or a callable object
```
## Versions
- **Opsdroid version:** latest
- **Python version:** 3.6
- **OS/Docker version:** MacOs Mojave
## Configuration File
Please include your version of the configuration file bellow.
```yaml
connectors:
- name: slack
api-token: "jdit-ksd12okr"
```
## Additional Details
Any other details you wish to include such as screenshots, console messages, etc.
<!-- Love opsdroid? Please consider supporting our collective:
+👉 https://opencollective.com/opsdroid/donate -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/connector/slack/__init__.py`
Content:
```
1 """A connector for Slack."""
2 import logging
3 import asyncio
4 import json
5 import re
6
7 import aiohttp
8 import websockets
9 import slacker
10 from aioslacker import Slacker
11 from emoji import demojize
12
13 from opsdroid.connector import Connector
14 from opsdroid.events import Message
15
16
17 _LOGGER = logging.getLogger(__name__)
18
19
20 class ConnectorSlack(Connector):
21 """A connector for Slack."""
22
23 def __init__(self, config, opsdroid=None):
24 """Create the connector."""
25 super().__init__(config, opsdroid=opsdroid)
26 _LOGGER.debug("Starting Slack connector")
27 self.name = "slack"
28 self.config = config
29 self.default_room = config.get("default-room", "#general")
30 self.icon_emoji = config.get("icon-emoji", ':robot_face:')
31 self.token = config["api-token"]
32 self.slacker = Slacker(self.token)
33 self.websocket = None
34 self.bot_name = config.get("bot-name", 'opsdroid')
35 self.known_users = {}
36 self.keepalive = None
37 self.reconnecting = False
38 self.listening = True
39 self._message_id = 0
40
41 async def connect(self):
42 """Connect to the chat service."""
43 _LOGGER.info("Connecting to Slack")
44
45 try:
46 connection = await self.slacker.rtm.start()
47 self.websocket = await websockets.connect(connection.body['url'])
48
49 _LOGGER.debug("Connected as %s", self.bot_name)
50 _LOGGER.debug("Using icon %s", self.icon_emoji)
51 _LOGGER.debug("Default room is %s", self.default_room)
52 _LOGGER.info("Connected successfully")
53
54 if self.keepalive is None or self.keepalive.done():
55 self.keepalive = self.opsdroid.eventloop.create_task(
56 self.keepalive_websocket())
57 except aiohttp.ClientOSError as error:
58 _LOGGER.error(error)
59 _LOGGER.error("Failed to connect to Slack, retrying in 10")
60 await self.reconnect(10)
61 except Exception:
62 await self.disconnect()
63 raise
64
65 async def reconnect(self, delay=None):
66 """Reconnect to the websocket."""
67 try:
68 self.reconnecting = True
69 if delay is not None:
70 await asyncio.sleep(delay)
71 await self.connect()
72 finally:
73 self.reconnecting = False
74
75 async def disconnect(self):
76 """Disconnect from Slack."""
77 await self.slacker.close()
78
79 async def listen(self):
80 """Listen for and parse new messages."""
81 while self.listening:
82 await self.receive_from_websocket()
83
84 async def receive_from_websocket(self):
85 """Get the next message from the websocket."""
86 try:
87 content = await self.websocket.recv()
88 await self.process_message(json.loads(content))
89 except websockets.exceptions.ConnectionClosed:
90 _LOGGER.info("Slack websocket closed, reconnecting...")
91 await self.reconnect(5)
92
93 async def process_message(self, message):
94 """Process a raw message and pass it to the parser."""
95 if "type" in message and message["type"] == "message" and \
96 "user" in message:
97
98 # Ignore bot messages
99 if "subtype" in message and \
100 message["subtype"] == "bot_message":
101 return
102
103 # Lookup username
104 _LOGGER.debug("Looking up sender username")
105 try:
106 user_info = await self.lookup_username(message["user"])
107 except ValueError:
108 return
109
110 # Replace usernames in the message
111 _LOGGER.debug("Replacing userids in message with usernames")
112 message["text"] = await self.replace_usernames(
113 message["text"])
114
115 await self.opsdroid.parse(Message(user_info["name"],
116 message["channel"],
117 self,
118 message["text"],
119 raw_event=message))
120
121 async def respond(self, message, room=None):
122 """Respond with a message."""
123 _LOGGER.debug("Responding with: '%s' in room %s",
124 message.text, message.room)
125 await self.slacker.chat.post_message(message.room,
126 message.text,
127 as_user=False,
128 username=self.bot_name,
129 icon_emoji=self.icon_emoji)
130
131 async def react(self, message, emoji):
132 """React to a message."""
133 emoji = demojize(emoji)
134 _LOGGER.debug("Reacting with: %s", emoji)
135 try:
136 await self.slacker.reactions.post('reactions.add', data={
137 'name': emoji,
138 'channel': message.room,
139 'timestamp': message.raw_event['ts']
140 })
141 except slacker.Error as error:
142 if str(error) == 'invalid_name':
143 _LOGGER.warning('Slack does not support the emoji %s', emoji)
144 else:
145 raise
146
147 async def keepalive_websocket(self):
148 """Keep pinging the websocket to keep it alive."""
149 while self.listening:
150 await self.ping_websocket()
151
152 async def ping_websocket(self):
153 """Ping the websocket."""
154 await asyncio.sleep(60)
155 self._message_id += 1
156 try:
157 await self.websocket.send(
158 json.dumps({'id': self._message_id, 'type': 'ping'}))
159 except (websockets.exceptions.InvalidState,
160 websockets.exceptions.ConnectionClosed,
161 aiohttp.ClientOSError,
162 TimeoutError):
163 _LOGGER.info("Slack websocket closed, reconnecting...")
164 if not self.reconnecting:
165 await self.reconnect()
166
167 async def lookup_username(self, userid):
168 """Lookup a username and cache it."""
169 if userid in self.known_users:
170 user_info = self.known_users[userid]
171 else:
172 response = await self.slacker.users.info(userid)
173 user_info = response.body["user"]
174 if isinstance(user_info, dict):
175 self.known_users[userid] = user_info
176 else:
177 raise ValueError("Returned user is not a dict.")
178 return user_info
179
180 async def replace_usernames(self, message):
181 """Replace User ID with username in message text."""
182 userids = re.findall(r"\<\@([A-Z0-9]+)(?:\|.+)?\>", message)
183 for userid in userids:
184 user_info = await self.lookup_username(userid)
185 message = message.replace("<@{userid}>".format(userid=userid),
186 user_info["name"])
187 return message
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/connector/slack/__init__.py b/opsdroid/connector/slack/__init__.py
--- a/opsdroid/connector/slack/__init__.py
+++ b/opsdroid/connector/slack/__init__.py
@@ -58,6 +58,9 @@
_LOGGER.error(error)
_LOGGER.error("Failed to connect to Slack, retrying in 10")
await self.reconnect(10)
+ except slacker.Error as error:
+ _LOGGER.error("Unable to connect to Slack due to %s - "
+ "The Slack Connector will not be available.", error)
except Exception:
await self.disconnect()
raise
@@ -79,7 +82,10 @@
async def listen(self):
"""Listen for and parse new messages."""
while self.listening:
- await self.receive_from_websocket()
+ try:
+ await self.receive_from_websocket()
+ except AttributeError:
+ break
async def receive_from_websocket(self):
"""Get the next message from the websocket."""
| {"golden_diff": "diff --git a/opsdroid/connector/slack/__init__.py b/opsdroid/connector/slack/__init__.py\n--- a/opsdroid/connector/slack/__init__.py\n+++ b/opsdroid/connector/slack/__init__.py\n@@ -58,6 +58,9 @@\n _LOGGER.error(error)\n _LOGGER.error(\"Failed to connect to Slack, retrying in 10\")\n await self.reconnect(10)\n+ except slacker.Error as error:\n+ _LOGGER.error(\"Unable to connect to Slack due to %s - \"\n+ \"The Slack Connector will not be available.\", error)\n except Exception:\n await self.disconnect()\n raise\n@@ -79,7 +82,10 @@\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n while self.listening:\n- await self.receive_from_websocket()\n+ try:\n+ await self.receive_from_websocket()\n+ except AttributeError:\n+ break\n \n async def receive_from_websocket(self):\n \"\"\"Get the next message from the websocket.\"\"\"\n", "issue": "Handle slacker connector invalid token exception\n<!-- Before you post an issue or if you are unsure about something join our gitter channel https://gitter.im/opsdroid/ and ask away! We are more than happy to help you. -->\r\n# Description\r\nThis issue was found by @petri with #763\r\n\r\nWhen we use an invalid token with the slacker connector opsdroid crashes badly. This is probably due to the fact that we changed dependencies and the code doesn't handle the exception raised by the aioslacker library.\r\n\r\nWe should probably refactor the connect method to check if the exception `slacker.Error: invalid_auth` was raised, if so an error message should be logged and opsdroid should still be able to run.\r\n\r\n\r\n## Steps to Reproduce\r\n- Set a slack connector with a bad token on config.yaml\r\n- run opsdroid\r\n\r\n\r\n## Expected Functionality\r\nOpsdroid should still run but a message should be logged that the connector won't be active due to bad token.\r\n\r\n\r\n## Experienced Functionality\r\nOpsdroid crashes with a traceback\r\n\r\n```python\r\nTraceback (most recent call last):\r\n File \"/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\r\n \"__main__\", mod_spec)\r\n File \"/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/runpy.py\", line 85, in _run_code\r\n exec(code, run_globals)\r\n File \"/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/__main__.py\", line 206, in <module>\r\n init()\r\n File \"/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/__main__.py\", line 203, in init\r\n main()\r\n File \"/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/click/core.py\", line 764, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/click/core.py\", line 717, in main\r\n rv = self.invoke(ctx)\r\n File \"/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/click/core.py\", line 956, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/click/core.py\", line 555, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/__main__.py\", line 196, in main\r\n opsdroid.load()\r\n File \"/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/core.py\", line 153, in load\r\n self.start_connectors(self.modules[\"connectors\"])\r\n File \"/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/core.py\", line 248, in start_connectors\r\n self.eventloop.run_until_complete(connector.connect(self))\r\n File \"/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/base_events.py\", line 466, in run_until_complete\r\n return future.result()\r\n File \"/Users/fabiorosado/Documents/GitHub/opsdroid/opsdroid/connector/slack/__init__.py\", line 50, in connect\r\n connection = await self.slacker.rtm.start()\r\n File \"/Users/fabiorosado/.local/share/virtualenvs/opsdroid-13bLHlYD/lib/python3.6/site-packages/aioslacker/__init__.py\", line 97, in __request\r\n raise Error(response.error)\r\nslacker.Error: invalid_auth\r\nException ignored in: <bound method BaseEventLoop.__del__ of <_UnixSelectorEventLoop running=False closed=True debug=False>>\r\nTraceback (most recent call last):\r\n File \"/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/base_events.py\", line 511, in __del__\r\n File \"/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/unix_events.py\", line 65, in close\r\n File \"/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/unix_events.py\", line 146, in remove_signal_handler\r\n File \"/usr/local/Cellar/python3/3.6.1/Frameworks/Python.framework/Versions/3.6/lib/python3.6/signal.py\", line 47, in signal\r\nTypeError: signal handler must be signal.SIG_IGN, signal.SIG_DFL, or a callable object\r\n```\r\n\r\n## Versions\r\n- **Opsdroid version:** latest\r\n- **Python version:** 3.6\r\n- **OS/Docker version:** MacOs Mojave\r\n\r\n## Configuration File\r\nPlease include your version of the configuration file bellow.\r\n\r\n```yaml\r\nconnectors:\r\n - name: slack\r\n api-token: \"jdit-ksd12okr\"\r\n```\r\n\r\n## Additional Details\r\nAny other details you wish to include such as screenshots, console messages, etc.\r\n\r\n\r\n<!-- Love opsdroid? Please consider supporting our collective:\r\n +\ud83d\udc49 https://opencollective.com/opsdroid/donate -->\n", "before_files": [{"content": "\"\"\"A connector for Slack.\"\"\"\nimport logging\nimport asyncio\nimport json\nimport re\n\nimport aiohttp\nimport websockets\nimport slacker\nfrom aioslacker import Slacker\nfrom emoji import demojize\n\nfrom opsdroid.connector import Connector\nfrom opsdroid.events import Message\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass ConnectorSlack(Connector):\n \"\"\"A connector for Slack.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(\"Starting Slack connector\")\n self.name = \"slack\"\n self.config = config\n self.default_room = config.get(\"default-room\", \"#general\")\n self.icon_emoji = config.get(\"icon-emoji\", ':robot_face:')\n self.token = config[\"api-token\"]\n self.slacker = Slacker(self.token)\n self.websocket = None\n self.bot_name = config.get(\"bot-name\", 'opsdroid')\n self.known_users = {}\n self.keepalive = None\n self.reconnecting = False\n self.listening = True\n self._message_id = 0\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(\"Connecting to Slack\")\n\n try:\n connection = await self.slacker.rtm.start()\n self.websocket = await websockets.connect(connection.body['url'])\n\n _LOGGER.debug(\"Connected as %s\", self.bot_name)\n _LOGGER.debug(\"Using icon %s\", self.icon_emoji)\n _LOGGER.debug(\"Default room is %s\", self.default_room)\n _LOGGER.info(\"Connected successfully\")\n\n if self.keepalive is None or self.keepalive.done():\n self.keepalive = self.opsdroid.eventloop.create_task(\n self.keepalive_websocket())\n except aiohttp.ClientOSError as error:\n _LOGGER.error(error)\n _LOGGER.error(\"Failed to connect to Slack, retrying in 10\")\n await self.reconnect(10)\n except Exception:\n await self.disconnect()\n raise\n\n async def reconnect(self, delay=None):\n \"\"\"Reconnect to the websocket.\"\"\"\n try:\n self.reconnecting = True\n if delay is not None:\n await asyncio.sleep(delay)\n await self.connect()\n finally:\n self.reconnecting = False\n\n async def disconnect(self):\n \"\"\"Disconnect from Slack.\"\"\"\n await self.slacker.close()\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n while self.listening:\n await self.receive_from_websocket()\n\n async def receive_from_websocket(self):\n \"\"\"Get the next message from the websocket.\"\"\"\n try:\n content = await self.websocket.recv()\n await self.process_message(json.loads(content))\n except websockets.exceptions.ConnectionClosed:\n _LOGGER.info(\"Slack websocket closed, reconnecting...\")\n await self.reconnect(5)\n\n async def process_message(self, message):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n if \"type\" in message and message[\"type\"] == \"message\" and \\\n \"user\" in message:\n\n # Ignore bot messages\n if \"subtype\" in message and \\\n message[\"subtype\"] == \"bot_message\":\n return\n\n # Lookup username\n _LOGGER.debug(\"Looking up sender username\")\n try:\n user_info = await self.lookup_username(message[\"user\"])\n except ValueError:\n return\n\n # Replace usernames in the message\n _LOGGER.debug(\"Replacing userids in message with usernames\")\n message[\"text\"] = await self.replace_usernames(\n message[\"text\"])\n\n await self.opsdroid.parse(Message(user_info[\"name\"],\n message[\"channel\"],\n self,\n message[\"text\"],\n raw_event=message))\n\n async def respond(self, message, room=None):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\"Responding with: '%s' in room %s\",\n message.text, message.room)\n await self.slacker.chat.post_message(message.room,\n message.text,\n as_user=False,\n username=self.bot_name,\n icon_emoji=self.icon_emoji)\n\n async def react(self, message, emoji):\n \"\"\"React to a message.\"\"\"\n emoji = demojize(emoji)\n _LOGGER.debug(\"Reacting with: %s\", emoji)\n try:\n await self.slacker.reactions.post('reactions.add', data={\n 'name': emoji,\n 'channel': message.room,\n 'timestamp': message.raw_event['ts']\n })\n except slacker.Error as error:\n if str(error) == 'invalid_name':\n _LOGGER.warning('Slack does not support the emoji %s', emoji)\n else:\n raise\n\n async def keepalive_websocket(self):\n \"\"\"Keep pinging the websocket to keep it alive.\"\"\"\n while self.listening:\n await self.ping_websocket()\n\n async def ping_websocket(self):\n \"\"\"Ping the websocket.\"\"\"\n await asyncio.sleep(60)\n self._message_id += 1\n try:\n await self.websocket.send(\n json.dumps({'id': self._message_id, 'type': 'ping'}))\n except (websockets.exceptions.InvalidState,\n websockets.exceptions.ConnectionClosed,\n aiohttp.ClientOSError,\n TimeoutError):\n _LOGGER.info(\"Slack websocket closed, reconnecting...\")\n if not self.reconnecting:\n await self.reconnect()\n\n async def lookup_username(self, userid):\n \"\"\"Lookup a username and cache it.\"\"\"\n if userid in self.known_users:\n user_info = self.known_users[userid]\n else:\n response = await self.slacker.users.info(userid)\n user_info = response.body[\"user\"]\n if isinstance(user_info, dict):\n self.known_users[userid] = user_info\n else:\n raise ValueError(\"Returned user is not a dict.\")\n return user_info\n\n async def replace_usernames(self, message):\n \"\"\"Replace User ID with username in message text.\"\"\"\n userids = re.findall(r\"\\<\\@([A-Z0-9]+)(?:\\|.+)?\\>\", message)\n for userid in userids:\n user_info = await self.lookup_username(userid)\n message = message.replace(\"<@{userid}>\".format(userid=userid),\n user_info[\"name\"])\n return message\n", "path": "opsdroid/connector/slack/__init__.py"}], "after_files": [{"content": "\"\"\"A connector for Slack.\"\"\"\nimport logging\nimport asyncio\nimport json\nimport re\n\nimport aiohttp\nimport websockets\nimport slacker\nfrom aioslacker import Slacker\nfrom emoji import demojize\n\nfrom opsdroid.connector import Connector\nfrom opsdroid.events import Message\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass ConnectorSlack(Connector):\n \"\"\"A connector for Slack.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(\"Starting Slack connector\")\n self.name = \"slack\"\n self.config = config\n self.default_room = config.get(\"default-room\", \"#general\")\n self.icon_emoji = config.get(\"icon-emoji\", ':robot_face:')\n self.token = config[\"api-token\"]\n self.slacker = Slacker(self.token)\n self.websocket = None\n self.bot_name = config.get(\"bot-name\", 'opsdroid')\n self.known_users = {}\n self.keepalive = None\n self.reconnecting = False\n self.listening = True\n self._message_id = 0\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(\"Connecting to Slack\")\n\n try:\n connection = await self.slacker.rtm.start()\n self.websocket = await websockets.connect(connection.body['url'])\n\n _LOGGER.debug(\"Connected as %s\", self.bot_name)\n _LOGGER.debug(\"Using icon %s\", self.icon_emoji)\n _LOGGER.debug(\"Default room is %s\", self.default_room)\n _LOGGER.info(\"Connected successfully\")\n\n if self.keepalive is None or self.keepalive.done():\n self.keepalive = self.opsdroid.eventloop.create_task(\n self.keepalive_websocket())\n except aiohttp.ClientOSError as error:\n _LOGGER.error(error)\n _LOGGER.error(\"Failed to connect to Slack, retrying in 10\")\n await self.reconnect(10)\n except slacker.Error as error:\n _LOGGER.error(\"Unable to connect to Slack due to %s - \"\n \"The Slack Connector will not be available.\", error)\n except Exception:\n await self.disconnect()\n raise\n\n async def reconnect(self, delay=None):\n \"\"\"Reconnect to the websocket.\"\"\"\n try:\n self.reconnecting = True\n if delay is not None:\n await asyncio.sleep(delay)\n await self.connect()\n finally:\n self.reconnecting = False\n\n async def disconnect(self):\n \"\"\"Disconnect from Slack.\"\"\"\n await self.slacker.close()\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n while self.listening:\n try:\n await self.receive_from_websocket()\n except AttributeError:\n break\n\n async def receive_from_websocket(self):\n \"\"\"Get the next message from the websocket.\"\"\"\n try:\n content = await self.websocket.recv()\n await self.process_message(json.loads(content))\n except websockets.exceptions.ConnectionClosed:\n _LOGGER.info(\"Slack websocket closed, reconnecting...\")\n await self.reconnect(5)\n\n async def process_message(self, message):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n if \"type\" in message and message[\"type\"] == \"message\" and \\\n \"user\" in message:\n\n # Ignore bot messages\n if \"subtype\" in message and \\\n message[\"subtype\"] == \"bot_message\":\n return\n\n # Lookup username\n _LOGGER.debug(\"Looking up sender username\")\n try:\n user_info = await self.lookup_username(message[\"user\"])\n except ValueError:\n return\n\n # Replace usernames in the message\n _LOGGER.debug(\"Replacing userids in message with usernames\")\n message[\"text\"] = await self.replace_usernames(\n message[\"text\"])\n\n await self.opsdroid.parse(Message(user_info[\"name\"],\n message[\"channel\"],\n self,\n message[\"text\"],\n raw_event=message))\n\n async def respond(self, message, room=None):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\"Responding with: '%s' in room %s\",\n message.text, message.room)\n await self.slacker.chat.post_message(message.room,\n message.text,\n as_user=False,\n username=self.bot_name,\n icon_emoji=self.icon_emoji)\n\n async def react(self, message, emoji):\n \"\"\"React to a message.\"\"\"\n emoji = demojize(emoji)\n _LOGGER.debug(\"Reacting with: %s\", emoji)\n try:\n await self.slacker.reactions.post('reactions.add', data={\n 'name': emoji,\n 'channel': message.room,\n 'timestamp': message.raw_event['ts']\n })\n except slacker.Error as error:\n if str(error) == 'invalid_name':\n _LOGGER.warning('Slack does not support the emoji %s', emoji)\n else:\n raise\n\n async def keepalive_websocket(self):\n \"\"\"Keep pinging the websocket to keep it alive.\"\"\"\n while self.listening:\n await self.ping_websocket()\n\n async def ping_websocket(self):\n \"\"\"Ping the websocket.\"\"\"\n await asyncio.sleep(60)\n self._message_id += 1\n try:\n await self.websocket.send(\n json.dumps({'id': self._message_id, 'type': 'ping'}))\n except (websockets.exceptions.InvalidState,\n websockets.exceptions.ConnectionClosed,\n aiohttp.ClientOSError,\n TimeoutError):\n _LOGGER.info(\"Slack websocket closed, reconnecting...\")\n if not self.reconnecting:\n await self.reconnect()\n\n async def lookup_username(self, userid):\n \"\"\"Lookup a username and cache it.\"\"\"\n if userid in self.known_users:\n user_info = self.known_users[userid]\n else:\n response = await self.slacker.users.info(userid)\n user_info = response.body[\"user\"]\n if isinstance(user_info, dict):\n self.known_users[userid] = user_info\n else:\n raise ValueError(\"Returned user is not a dict.\")\n return user_info\n\n async def replace_usernames(self, message):\n \"\"\"Replace User ID with username in message text.\"\"\"\n userids = re.findall(r\"\\<\\@([A-Z0-9]+)(?:\\|.+)?\\>\", message)\n for userid in userids:\n user_info = await self.lookup_username(userid)\n message = message.replace(\"<@{userid}>\".format(userid=userid),\n user_info[\"name\"])\n return message\n", "path": "opsdroid/connector/slack/__init__.py"}]} | 3,421 | 240 |
gh_patches_debug_17137 | rasdani/github-patches | git_diff | falconry__falcon-2104 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Clarify that `TestClient` does not use the app's JSON handlers
As I'm writing pytest tests for my ASGI app, I'm using the TestClient as a fixture as described in the docs:
```python
@pytest.fixture
def falcon_test_client():
return TestClient(get_app())
```
`get_app()` is a function that creates and configures my app including some custom Json handlers (which as you can see below I also use for `application/x-www-form-urlencoded`):
```python
def get_app():
app = App()
json_handler = JSONHandler(
dumps=partial(json.dumps, cls=CustomJsonEncoder),
loads=partial(json.loads, cls=CustomJsonEncoder),
)
extra_handlers = {
falcon.MEDIA_JSON: json_handler,
falcon.MEDIA_URLENCODED: json_handler,
}
app.req_options.media_handlers.update(extra_handlers)
app.resp_options.media_handlers.update(extra_handlers)
# ... routes ...
return app
```
`CustomJsonEncoder` is an encoder that can handle `UUID` and `datetime` objects; testing it with `json.dumps()` works so I know the problem is not there.
The problem is that when I try to give some data that includes a `UUID` to `simulate_post()`, it fails with `TypeError: Object of type UUID is not JSON serializable`, and I can see from pytest's traceback that at the moment of the exception, `self` is `json.encoder.JSONEncoder`, namely not the custom encoder I defined.
This is the simple test I'm trying this with:
```python
def test_(falcon_test_client):
response = falcon_test_client.simulate_post(
"/my/endpoint",
json={
"data": {}, # Just a dict with str keys and a UUID as value
},
)
```
I'm either forgetting something or I got something wrong, but I'm not sure what and how.
Also, I was rather confused when I was looking for ways to simulate and receive `x-www-form-urlencoded` data: if I set the `content_type` accordingly in something like `simulate_post()` and put the data in `body` as a dict¹, `req.get_media()` in the route handler seems to receive it as plain text.
If I put the data in the `json` parameter, as stated in the docs, the `content_type` is forced to `application/json`, making my test subtly different from reality, and since there doesn't seem to be a way to avoid this behaviour, I'd rather use an alternative that keeps the content type intact. How should I handle this?
¹ I know that that is basically JSON, but my app needs to receive webhooks from an app which sends JSON-like data as `x-www-form-urlencoded`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `falcon/media/json.py`
Content:
```
1 from functools import partial
2 import json
3
4 from falcon import errors
5 from falcon import http_error
6 from falcon.media.base import BaseHandler
7 from falcon.media.base import TextBaseHandlerWS
8
9
10 class JSONHandler(BaseHandler):
11 """JSON media handler.
12
13 This handler uses Python's standard :py:mod:`json` library by default, but
14 can be easily configured to use any of a number of third-party JSON
15 libraries, depending on your needs. For example, you can often
16 realize a significant performance boost under CPython by using an
17 alternative library. Good options in this respect include `orjson`,
18 `python-rapidjson`, and `mujson`.
19
20 This handler will raise a :class:`falcon.MediaNotFoundError` when attempting
21 to parse an empty body, or a :class:`falcon.MediaMalformedError`
22 if an error happens while parsing the body.
23
24 Note:
25 If you are deploying to PyPy, we recommend sticking with the standard
26 library's JSON implementation, since it will be faster in most cases
27 as compared to a third-party library.
28
29 .. rubric:: Custom JSON library
30
31 You can replace the default JSON handler by using a custom JSON library
32 (see also: :ref:`custom_media_handlers`). Overriding the default JSON
33 implementation is simply a matter of specifying the desired ``dumps`` and
34 ``loads`` functions::
35
36 import falcon
37 from falcon import media
38
39 import rapidjson
40
41 json_handler = media.JSONHandler(
42 dumps=rapidjson.dumps,
43 loads=rapidjson.loads,
44 )
45 extra_handlers = {
46 'application/json': json_handler,
47 }
48
49 app = falcon.App()
50 app.req_options.media_handlers.update(extra_handlers)
51 app.resp_options.media_handlers.update(extra_handlers)
52
53 .. rubric:: Custom serialization parameters
54
55 Even if you decide to stick with the stdlib's :any:`json.dumps` and
56 :any:`json.loads`, you can wrap them using :any:`functools.partial` to
57 provide custom serialization or deserialization parameters supported by the
58 ``dumps`` and ``loads`` functions, respectively
59 (see also: :ref:`prettifying-json-responses`)::
60
61 import falcon
62 from falcon import media
63
64 from functools import partial
65
66 json_handler = media.JSONHandler(
67 dumps=partial(
68 json.dumps,
69 default=str,
70 sort_keys=True,
71 ),
72 )
73 extra_handlers = {
74 'application/json': json_handler,
75 }
76
77 app = falcon.App()
78 app.req_options.media_handlers.update(extra_handlers)
79 app.resp_options.media_handlers.update(extra_handlers)
80
81 By default, ``ensure_ascii`` is passed to the ``json.dumps`` function.
82 If you override the ``dumps`` function, you will need to explicitly set
83 ``ensure_ascii`` to ``False`` in order to enable the serialization of
84 Unicode characters to UTF-8. This is easily done by using
85 :any:`functools.partial` to apply the desired keyword argument. As also
86 demonstrated in the previous paragraph, you can use this same technique to
87 customize any option supported by the ``dumps`` and ``loads`` functions::
88
89 from functools import partial
90
91 from falcon import media
92 import rapidjson
93
94 json_handler = media.JSONHandler(
95 dumps=partial(
96 rapidjson.dumps,
97 ensure_ascii=False, sort_keys=True
98 ),
99 )
100
101 .. _custom-media-json-encoder:
102
103 .. rubric:: Custom JSON encoder
104
105 You can also override the default :class:`~json.JSONEncoder` by using a
106 custom Encoder and updating the media handlers for ``application/json``
107 type to use that::
108
109 import json
110 from datetime import datetime
111 from functools import partial
112
113 import falcon
114 from falcon import media
115
116 class DatetimeEncoder(json.JSONEncoder):
117 \"\"\"Json Encoder that supports datetime objects.\"\"\"
118
119 def default(self, obj):
120 if isinstance(obj, datetime):
121 return obj.isoformat()
122 return super().default(obj)
123
124 app = falcon.App()
125
126 json_handler = media.JSONHandler(
127 dumps=partial(json.dumps, cls=DatetimeEncoder),
128 )
129 extra_handlers = {
130 'application/json': json_handler,
131 }
132
133 app.req_options.media_handlers.update(extra_handlers)
134 app.resp_options.media_handlers.update(extra_handlers)
135
136
137 Keyword Arguments:
138 dumps (func): Function to use when serializing JSON responses.
139 loads (func): Function to use when deserializing JSON requests.
140 """
141
142 def __init__(self, dumps=None, loads=None):
143 self._dumps = dumps or partial(json.dumps, ensure_ascii=False)
144 self._loads = loads or json.loads
145
146 # PERF(kgriffs): Test dumps once up front so we can set the
147 # proper serialize implementation.
148 result = self._dumps({'message': 'Hello World'})
149 if isinstance(result, str):
150 self.serialize = self._serialize_s
151 self.serialize_async = self._serialize_async_s
152 else:
153 self.serialize = self._serialize_b
154 self.serialize_async = self._serialize_async_b
155
156 # NOTE(kgriffs): To be safe, only enable the optimized protocol when
157 # not subclassed.
158 if type(self) is JSONHandler:
159 self._serialize_sync = self.serialize
160 self._deserialize_sync = self._deserialize
161
162 def _deserialize(self, data):
163 if not data:
164 raise errors.MediaNotFoundError('JSON')
165 try:
166 return self._loads(data.decode())
167 except ValueError as err:
168 raise errors.MediaMalformedError('JSON') from err
169
170 def deserialize(self, stream, content_type, content_length):
171 return self._deserialize(stream.read())
172
173 async def deserialize_async(self, stream, content_type, content_length):
174 return self._deserialize(await stream.read())
175
176 # NOTE(kgriffs): Make content_type a kwarg to support the
177 # Request.render_body() shortcut optimization.
178 def _serialize_s(self, media, content_type=None) -> bytes:
179 return self._dumps(media).encode()
180
181 async def _serialize_async_s(self, media, content_type) -> bytes:
182 return self._dumps(media).encode()
183
184 def _serialize_b(self, media, content_type) -> bytes:
185 return self._dumps(media)
186
187 async def _serialize_async_b(self, media, content_type) -> bytes:
188 return self._dumps(media)
189
190
191 class JSONHandlerWS(TextBaseHandlerWS):
192 """WebSocket media handler for de(serializing) JSON to/from TEXT payloads.
193
194 This handler uses Python's standard :py:mod:`json` library by default, but
195 can be easily configured to use any of a number of third-party JSON
196 libraries, depending on your needs. For example, you can often
197 realize a significant performance boost under CPython by using an
198 alternative library. Good options in this respect include `orjson`,
199 `python-rapidjson`, and `mujson`.
200
201 Note:
202 If you are deploying to PyPy, we recommend sticking with the standard
203 library's JSON implementation, since it will be faster in most cases
204 as compared to a third-party library.
205
206 Overriding the default JSON implementation is simply a matter of specifying
207 the desired ``dumps`` and ``loads`` functions::
208
209 import falcon
210 from falcon import media
211
212 import rapidjson
213
214 json_handler = media.JSONHandlerWS(
215 dumps=rapidjson.dumps,
216 loads=rapidjson.loads,
217 )
218
219 app = falcon.asgi.App()
220 app.ws_options.media_handlers[falcon.WebSocketPayloadType.TEXT] = json_handler
221
222 By default, ``ensure_ascii`` is passed to the ``json.dumps`` function.
223 If you override the ``dumps`` function, you will need to explicitly set
224 ``ensure_ascii`` to ``False`` in order to enable the serialization of
225 Unicode characters to UTF-8. This is easily done by using
226 :any:`functools.partial` to apply the desired keyword argument. In fact, you
227 can use this same technique to customize any option supported by the
228 ``dumps`` and ``loads`` functions::
229
230 from functools import partial
231
232 from falcon import media
233 import rapidjson
234
235 json_handler = media.JSONHandlerWS(
236 dumps=partial(
237 rapidjson.dumps,
238 ensure_ascii=False, sort_keys=True
239 ),
240 )
241
242 Keyword Arguments:
243 dumps (func): Function to use when serializing JSON.
244 loads (func): Function to use when deserializing JSON.
245 """
246
247 __slots__ = ['dumps', 'loads']
248
249 def __init__(self, dumps=None, loads=None):
250 self._dumps = dumps or partial(json.dumps, ensure_ascii=False)
251 self._loads = loads or json.loads
252
253 def serialize(self, media: object) -> str:
254 return self._dumps(media)
255
256 def deserialize(self, payload: str) -> object:
257 return self._loads(payload)
258
259
260 http_error._DEFAULT_JSON_HANDLER = _DEFAULT_JSON_HANDLER = JSONHandler() # type: ignore
261
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/falcon/media/json.py b/falcon/media/json.py
--- a/falcon/media/json.py
+++ b/falcon/media/json.py
@@ -133,6 +133,15 @@
app.req_options.media_handlers.update(extra_handlers)
app.resp_options.media_handlers.update(extra_handlers)
+ .. note:: When testing an application employing a custom JSON encoder, bear
+ in mind that :class:`~.testing.TestClient` is decoupled from the app,
+ and it simulates requests as if they were performed by a third-party
+ client (just sans network). Therefore, passing the **json** parameter
+ to :ref:`simulate_* <testing_standalone_methods>` methods will
+ effectively use the stdlib's :func:`json.dumps`. If you want to
+ serialize custom objects for testing, you will need to dump them into a
+ string yourself, and pass it using the **body** parameter instead
+ (accompanied by the ``application/json`` content type header).
Keyword Arguments:
dumps (func): Function to use when serializing JSON responses.
| {"golden_diff": "diff --git a/falcon/media/json.py b/falcon/media/json.py\n--- a/falcon/media/json.py\n+++ b/falcon/media/json.py\n@@ -133,6 +133,15 @@\n app.req_options.media_handlers.update(extra_handlers)\n app.resp_options.media_handlers.update(extra_handlers)\n \n+ .. note:: When testing an application employing a custom JSON encoder, bear\n+ in mind that :class:`~.testing.TestClient` is decoupled from the app,\n+ and it simulates requests as if they were performed by a third-party\n+ client (just sans network). Therefore, passing the **json** parameter\n+ to :ref:`simulate_* <testing_standalone_methods>` methods will\n+ effectively use the stdlib's :func:`json.dumps`. If you want to\n+ serialize custom objects for testing, you will need to dump them into a\n+ string yourself, and pass it using the **body** parameter instead\n+ (accompanied by the ``application/json`` content type header).\n \n Keyword Arguments:\n dumps (func): Function to use when serializing JSON responses.\n", "issue": "Clarify that `TestClient` does not use the app's JSON handlers\nAs I'm writing pytest tests for my ASGI app, I'm using the TestClient as a fixture as described in the docs:\r\n\r\n```python\r\[email protected]\r\ndef falcon_test_client():\r\n return TestClient(get_app())\r\n```\r\n\r\n`get_app()` is a function that creates and configures my app including some custom Json handlers (which as you can see below I also use for `application/x-www-form-urlencoded`):\r\n```python\r\ndef get_app():\r\n app = App()\r\n\r\n json_handler = JSONHandler(\r\n dumps=partial(json.dumps, cls=CustomJsonEncoder),\r\n loads=partial(json.loads, cls=CustomJsonEncoder),\r\n )\r\n\r\n extra_handlers = {\r\n falcon.MEDIA_JSON: json_handler,\r\n falcon.MEDIA_URLENCODED: json_handler,\r\n }\r\n\r\n app.req_options.media_handlers.update(extra_handlers)\r\n app.resp_options.media_handlers.update(extra_handlers)\r\n\r\n # ... routes ...\r\n\r\n return app\r\n```\r\n\r\n`CustomJsonEncoder` is an encoder that can handle `UUID` and `datetime` objects; testing it with `json.dumps()` works so I know the problem is not there.\r\n\r\nThe problem is that when I try to give some data that includes a `UUID` to `simulate_post()`, it fails with `TypeError: Object of type UUID is not JSON serializable`, and I can see from pytest's traceback that at the moment of the exception, `self` is `json.encoder.JSONEncoder`, namely not the custom encoder I defined.\r\n\r\nThis is the simple test I'm trying this with:\r\n\r\n```python\r\ndef test_(falcon_test_client):\r\n response = falcon_test_client.simulate_post(\r\n \"/my/endpoint\",\r\n json={\r\n \"data\": {}, # Just a dict with str keys and a UUID as value\r\n },\r\n )\r\n```\r\n\r\nI'm either forgetting something or I got something wrong, but I'm not sure what and how.\r\n\r\nAlso, I was rather confused when I was looking for ways to simulate and receive `x-www-form-urlencoded` data: if I set the `content_type` accordingly in something like `simulate_post()` and put the data in `body` as a dict\u00b9, `req.get_media()` in the route handler seems to receive it as plain text. \r\nIf I put the data in the `json` parameter, as stated in the docs, the `content_type` is forced to `application/json`, making my test subtly different from reality, and since there doesn't seem to be a way to avoid this behaviour, I'd rather use an alternative that keeps the content type intact. How should I handle this?\r\n\r\n\u00b9 I know that that is basically JSON, but my app needs to receive webhooks from an app which sends JSON-like data as `x-www-form-urlencoded`.\n", "before_files": [{"content": "from functools import partial\nimport json\n\nfrom falcon import errors\nfrom falcon import http_error\nfrom falcon.media.base import BaseHandler\nfrom falcon.media.base import TextBaseHandlerWS\n\n\nclass JSONHandler(BaseHandler):\n \"\"\"JSON media handler.\n\n This handler uses Python's standard :py:mod:`json` library by default, but\n can be easily configured to use any of a number of third-party JSON\n libraries, depending on your needs. For example, you can often\n realize a significant performance boost under CPython by using an\n alternative library. Good options in this respect include `orjson`,\n `python-rapidjson`, and `mujson`.\n\n This handler will raise a :class:`falcon.MediaNotFoundError` when attempting\n to parse an empty body, or a :class:`falcon.MediaMalformedError`\n if an error happens while parsing the body.\n\n Note:\n If you are deploying to PyPy, we recommend sticking with the standard\n library's JSON implementation, since it will be faster in most cases\n as compared to a third-party library.\n\n .. rubric:: Custom JSON library\n\n You can replace the default JSON handler by using a custom JSON library\n (see also: :ref:`custom_media_handlers`). Overriding the default JSON\n implementation is simply a matter of specifying the desired ``dumps`` and\n ``loads`` functions::\n\n import falcon\n from falcon import media\n\n import rapidjson\n\n json_handler = media.JSONHandler(\n dumps=rapidjson.dumps,\n loads=rapidjson.loads,\n )\n extra_handlers = {\n 'application/json': json_handler,\n }\n\n app = falcon.App()\n app.req_options.media_handlers.update(extra_handlers)\n app.resp_options.media_handlers.update(extra_handlers)\n\n .. rubric:: Custom serialization parameters\n\n Even if you decide to stick with the stdlib's :any:`json.dumps` and\n :any:`json.loads`, you can wrap them using :any:`functools.partial` to\n provide custom serialization or deserialization parameters supported by the\n ``dumps`` and ``loads`` functions, respectively\n (see also: :ref:`prettifying-json-responses`)::\n\n import falcon\n from falcon import media\n\n from functools import partial\n\n json_handler = media.JSONHandler(\n dumps=partial(\n json.dumps,\n default=str,\n sort_keys=True,\n ),\n )\n extra_handlers = {\n 'application/json': json_handler,\n }\n\n app = falcon.App()\n app.req_options.media_handlers.update(extra_handlers)\n app.resp_options.media_handlers.update(extra_handlers)\n\n By default, ``ensure_ascii`` is passed to the ``json.dumps`` function.\n If you override the ``dumps`` function, you will need to explicitly set\n ``ensure_ascii`` to ``False`` in order to enable the serialization of\n Unicode characters to UTF-8. This is easily done by using\n :any:`functools.partial` to apply the desired keyword argument. As also\n demonstrated in the previous paragraph, you can use this same technique to\n customize any option supported by the ``dumps`` and ``loads`` functions::\n\n from functools import partial\n\n from falcon import media\n import rapidjson\n\n json_handler = media.JSONHandler(\n dumps=partial(\n rapidjson.dumps,\n ensure_ascii=False, sort_keys=True\n ),\n )\n\n .. _custom-media-json-encoder:\n\n .. rubric:: Custom JSON encoder\n\n You can also override the default :class:`~json.JSONEncoder` by using a\n custom Encoder and updating the media handlers for ``application/json``\n type to use that::\n\n import json\n from datetime import datetime\n from functools import partial\n\n import falcon\n from falcon import media\n\n class DatetimeEncoder(json.JSONEncoder):\n \\\"\\\"\\\"Json Encoder that supports datetime objects.\\\"\\\"\\\"\n\n def default(self, obj):\n if isinstance(obj, datetime):\n return obj.isoformat()\n return super().default(obj)\n\n app = falcon.App()\n\n json_handler = media.JSONHandler(\n dumps=partial(json.dumps, cls=DatetimeEncoder),\n )\n extra_handlers = {\n 'application/json': json_handler,\n }\n\n app.req_options.media_handlers.update(extra_handlers)\n app.resp_options.media_handlers.update(extra_handlers)\n\n\n Keyword Arguments:\n dumps (func): Function to use when serializing JSON responses.\n loads (func): Function to use when deserializing JSON requests.\n \"\"\"\n\n def __init__(self, dumps=None, loads=None):\n self._dumps = dumps or partial(json.dumps, ensure_ascii=False)\n self._loads = loads or json.loads\n\n # PERF(kgriffs): Test dumps once up front so we can set the\n # proper serialize implementation.\n result = self._dumps({'message': 'Hello World'})\n if isinstance(result, str):\n self.serialize = self._serialize_s\n self.serialize_async = self._serialize_async_s\n else:\n self.serialize = self._serialize_b\n self.serialize_async = self._serialize_async_b\n\n # NOTE(kgriffs): To be safe, only enable the optimized protocol when\n # not subclassed.\n if type(self) is JSONHandler:\n self._serialize_sync = self.serialize\n self._deserialize_sync = self._deserialize\n\n def _deserialize(self, data):\n if not data:\n raise errors.MediaNotFoundError('JSON')\n try:\n return self._loads(data.decode())\n except ValueError as err:\n raise errors.MediaMalformedError('JSON') from err\n\n def deserialize(self, stream, content_type, content_length):\n return self._deserialize(stream.read())\n\n async def deserialize_async(self, stream, content_type, content_length):\n return self._deserialize(await stream.read())\n\n # NOTE(kgriffs): Make content_type a kwarg to support the\n # Request.render_body() shortcut optimization.\n def _serialize_s(self, media, content_type=None) -> bytes:\n return self._dumps(media).encode()\n\n async def _serialize_async_s(self, media, content_type) -> bytes:\n return self._dumps(media).encode()\n\n def _serialize_b(self, media, content_type) -> bytes:\n return self._dumps(media)\n\n async def _serialize_async_b(self, media, content_type) -> bytes:\n return self._dumps(media)\n\n\nclass JSONHandlerWS(TextBaseHandlerWS):\n \"\"\"WebSocket media handler for de(serializing) JSON to/from TEXT payloads.\n\n This handler uses Python's standard :py:mod:`json` library by default, but\n can be easily configured to use any of a number of third-party JSON\n libraries, depending on your needs. For example, you can often\n realize a significant performance boost under CPython by using an\n alternative library. Good options in this respect include `orjson`,\n `python-rapidjson`, and `mujson`.\n\n Note:\n If you are deploying to PyPy, we recommend sticking with the standard\n library's JSON implementation, since it will be faster in most cases\n as compared to a third-party library.\n\n Overriding the default JSON implementation is simply a matter of specifying\n the desired ``dumps`` and ``loads`` functions::\n\n import falcon\n from falcon import media\n\n import rapidjson\n\n json_handler = media.JSONHandlerWS(\n dumps=rapidjson.dumps,\n loads=rapidjson.loads,\n )\n\n app = falcon.asgi.App()\n app.ws_options.media_handlers[falcon.WebSocketPayloadType.TEXT] = json_handler\n\n By default, ``ensure_ascii`` is passed to the ``json.dumps`` function.\n If you override the ``dumps`` function, you will need to explicitly set\n ``ensure_ascii`` to ``False`` in order to enable the serialization of\n Unicode characters to UTF-8. This is easily done by using\n :any:`functools.partial` to apply the desired keyword argument. In fact, you\n can use this same technique to customize any option supported by the\n ``dumps`` and ``loads`` functions::\n\n from functools import partial\n\n from falcon import media\n import rapidjson\n\n json_handler = media.JSONHandlerWS(\n dumps=partial(\n rapidjson.dumps,\n ensure_ascii=False, sort_keys=True\n ),\n )\n\n Keyword Arguments:\n dumps (func): Function to use when serializing JSON.\n loads (func): Function to use when deserializing JSON.\n \"\"\"\n\n __slots__ = ['dumps', 'loads']\n\n def __init__(self, dumps=None, loads=None):\n self._dumps = dumps or partial(json.dumps, ensure_ascii=False)\n self._loads = loads or json.loads\n\n def serialize(self, media: object) -> str:\n return self._dumps(media)\n\n def deserialize(self, payload: str) -> object:\n return self._loads(payload)\n\n\nhttp_error._DEFAULT_JSON_HANDLER = _DEFAULT_JSON_HANDLER = JSONHandler() # type: ignore\n", "path": "falcon/media/json.py"}], "after_files": [{"content": "from functools import partial\nimport json\n\nfrom falcon import errors\nfrom falcon import http_error\nfrom falcon.media.base import BaseHandler\nfrom falcon.media.base import TextBaseHandlerWS\n\n\nclass JSONHandler(BaseHandler):\n \"\"\"JSON media handler.\n\n This handler uses Python's standard :py:mod:`json` library by default, but\n can be easily configured to use any of a number of third-party JSON\n libraries, depending on your needs. For example, you can often\n realize a significant performance boost under CPython by using an\n alternative library. Good options in this respect include `orjson`,\n `python-rapidjson`, and `mujson`.\n\n This handler will raise a :class:`falcon.MediaNotFoundError` when attempting\n to parse an empty body, or a :class:`falcon.MediaMalformedError`\n if an error happens while parsing the body.\n\n Note:\n If you are deploying to PyPy, we recommend sticking with the standard\n library's JSON implementation, since it will be faster in most cases\n as compared to a third-party library.\n\n .. rubric:: Custom JSON library\n\n You can replace the default JSON handler by using a custom JSON library\n (see also: :ref:`custom_media_handlers`). Overriding the default JSON\n implementation is simply a matter of specifying the desired ``dumps`` and\n ``loads`` functions::\n\n import falcon\n from falcon import media\n\n import rapidjson\n\n json_handler = media.JSONHandler(\n dumps=rapidjson.dumps,\n loads=rapidjson.loads,\n )\n extra_handlers = {\n 'application/json': json_handler,\n }\n\n app = falcon.App()\n app.req_options.media_handlers.update(extra_handlers)\n app.resp_options.media_handlers.update(extra_handlers)\n\n .. rubric:: Custom serialization parameters\n\n Even if you decide to stick with the stdlib's :any:`json.dumps` and\n :any:`json.loads`, you can wrap them using :any:`functools.partial` to\n provide custom serialization or deserialization parameters supported by the\n ``dumps`` and ``loads`` functions, respectively\n (see also: :ref:`prettifying-json-responses`)::\n\n import falcon\n from falcon import media\n\n from functools import partial\n\n json_handler = media.JSONHandler(\n dumps=partial(\n json.dumps,\n default=str,\n sort_keys=True,\n ),\n )\n extra_handlers = {\n 'application/json': json_handler,\n }\n\n app = falcon.App()\n app.req_options.media_handlers.update(extra_handlers)\n app.resp_options.media_handlers.update(extra_handlers)\n\n By default, ``ensure_ascii`` is passed to the ``json.dumps`` function.\n If you override the ``dumps`` function, you will need to explicitly set\n ``ensure_ascii`` to ``False`` in order to enable the serialization of\n Unicode characters to UTF-8. This is easily done by using\n :any:`functools.partial` to apply the desired keyword argument. As also\n demonstrated in the previous paragraph, you can use this same technique to\n customize any option supported by the ``dumps`` and ``loads`` functions::\n\n from functools import partial\n\n from falcon import media\n import rapidjson\n\n json_handler = media.JSONHandler(\n dumps=partial(\n rapidjson.dumps,\n ensure_ascii=False, sort_keys=True\n ),\n )\n\n .. _custom-media-json-encoder:\n\n .. rubric:: Custom JSON encoder\n\n You can also override the default :class:`~json.JSONEncoder` by using a\n custom Encoder and updating the media handlers for ``application/json``\n type to use that::\n\n import json\n from datetime import datetime\n from functools import partial\n\n import falcon\n from falcon import media\n\n class DatetimeEncoder(json.JSONEncoder):\n \\\"\\\"\\\"Json Encoder that supports datetime objects.\\\"\\\"\\\"\n\n def default(self, obj):\n if isinstance(obj, datetime):\n return obj.isoformat()\n return super().default(obj)\n\n app = falcon.App()\n\n json_handler = media.JSONHandler(\n dumps=partial(json.dumps, cls=DatetimeEncoder),\n )\n extra_handlers = {\n 'application/json': json_handler,\n }\n\n app.req_options.media_handlers.update(extra_handlers)\n app.resp_options.media_handlers.update(extra_handlers)\n\n .. note:: When testing an application employing a custom JSON encoder, bear\n in mind that :class:`~.testing.TestClient` is decoupled from the app,\n and it simulates requests as if they were performed by a third-party\n client (just sans network). Therefore, passing the **json** parameter\n to :ref:`simulate_* <testing_standalone_methods>` methods will\n effectively use the stdlib's :func:`json.dumps`. If you want to\n serialize custom objects for testing, you will need to dump them into a\n string yourself, and pass it using the **body** parameter instead\n (accompanied by the ``application/json`` content type header).\n\n Keyword Arguments:\n dumps (func): Function to use when serializing JSON responses.\n loads (func): Function to use when deserializing JSON requests.\n \"\"\"\n\n def __init__(self, dumps=None, loads=None):\n self._dumps = dumps or partial(json.dumps, ensure_ascii=False)\n self._loads = loads or json.loads\n\n # PERF(kgriffs): Test dumps once up front so we can set the\n # proper serialize implementation.\n result = self._dumps({'message': 'Hello World'})\n if isinstance(result, str):\n self.serialize = self._serialize_s\n self.serialize_async = self._serialize_async_s\n else:\n self.serialize = self._serialize_b\n self.serialize_async = self._serialize_async_b\n\n # NOTE(kgriffs): To be safe, only enable the optimized protocol when\n # not subclassed.\n if type(self) is JSONHandler:\n self._serialize_sync = self.serialize\n self._deserialize_sync = self._deserialize\n\n def _deserialize(self, data):\n if not data:\n raise errors.MediaNotFoundError('JSON')\n try:\n return self._loads(data.decode())\n except ValueError as err:\n raise errors.MediaMalformedError('JSON') from err\n\n def deserialize(self, stream, content_type, content_length):\n return self._deserialize(stream.read())\n\n async def deserialize_async(self, stream, content_type, content_length):\n return self._deserialize(await stream.read())\n\n # NOTE(kgriffs): Make content_type a kwarg to support the\n # Request.render_body() shortcut optimization.\n def _serialize_s(self, media, content_type=None) -> bytes:\n return self._dumps(media).encode()\n\n async def _serialize_async_s(self, media, content_type) -> bytes:\n return self._dumps(media).encode()\n\n def _serialize_b(self, media, content_type) -> bytes:\n return self._dumps(media)\n\n async def _serialize_async_b(self, media, content_type) -> bytes:\n return self._dumps(media)\n\n\nclass JSONHandlerWS(TextBaseHandlerWS):\n \"\"\"WebSocket media handler for de(serializing) JSON to/from TEXT payloads.\n\n This handler uses Python's standard :py:mod:`json` library by default, but\n can be easily configured to use any of a number of third-party JSON\n libraries, depending on your needs. For example, you can often\n realize a significant performance boost under CPython by using an\n alternative library. Good options in this respect include `orjson`,\n `python-rapidjson`, and `mujson`.\n\n Note:\n If you are deploying to PyPy, we recommend sticking with the standard\n library's JSON implementation, since it will be faster in most cases\n as compared to a third-party library.\n\n Overriding the default JSON implementation is simply a matter of specifying\n the desired ``dumps`` and ``loads`` functions::\n\n import falcon\n from falcon import media\n\n import rapidjson\n\n json_handler = media.JSONHandlerWS(\n dumps=rapidjson.dumps,\n loads=rapidjson.loads,\n )\n\n app = falcon.asgi.App()\n app.ws_options.media_handlers[falcon.WebSocketPayloadType.TEXT] = json_handler\n\n By default, ``ensure_ascii`` is passed to the ``json.dumps`` function.\n If you override the ``dumps`` function, you will need to explicitly set\n ``ensure_ascii`` to ``False`` in order to enable the serialization of\n Unicode characters to UTF-8. This is easily done by using\n :any:`functools.partial` to apply the desired keyword argument. In fact, you\n can use this same technique to customize any option supported by the\n ``dumps`` and ``loads`` functions::\n\n from functools import partial\n\n from falcon import media\n import rapidjson\n\n json_handler = media.JSONHandlerWS(\n dumps=partial(\n rapidjson.dumps,\n ensure_ascii=False, sort_keys=True\n ),\n )\n\n Keyword Arguments:\n dumps (func): Function to use when serializing JSON.\n loads (func): Function to use when deserializing JSON.\n \"\"\"\n\n __slots__ = ['dumps', 'loads']\n\n def __init__(self, dumps=None, loads=None):\n self._dumps = dumps or partial(json.dumps, ensure_ascii=False)\n self._loads = loads or json.loads\n\n def serialize(self, media: object) -> str:\n return self._dumps(media)\n\n def deserialize(self, payload: str) -> object:\n return self._loads(payload)\n\n\nhttp_error._DEFAULT_JSON_HANDLER = _DEFAULT_JSON_HANDLER = JSONHandler() # type: ignore\n", "path": "falcon/media/json.py"}]} | 3,510 | 243 |
gh_patches_debug_450 | rasdani/github-patches | git_diff | spyder-ide__spyder-6089 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spyder not starting in macOS because pyopengl is present
I updated python and spyder using "conda update spyder" and "conda update python" respectively on MacOSX. Is this a python error or a spyder error?
Please find attached the conda list output listing the versions.
Thanks for your support.
--------
Traceback (most recent call last):
File "/Users/Nagraj/anaconda3/bin/spyder", line 7, in
[conda_list.txt](https://github.com/spyder-ide/spyder/files/1590875/conda_list.txt)
from spyder.app.start import main
File "/Users/Nagraj/anaconda3/lib/python3.6/site-packages/spyder/app/start.py", line 19, in
from OpenGL import GL
File "/Users/Nagraj/anaconda3/lib/python3.6/site-packages/OpenGL/GL/__init__.py", line 3, in
from OpenGL import error as _error
File "/Users/Nagraj/anaconda3/lib/python3.6/site-packages/OpenGL/error.py", line 12, in
from OpenGL import platform, _configflags
File "/Users/Nagraj/anaconda3/lib/python3.6/site-packages/OpenGL/platform/__init__.py", line 35, in
_load()
File "/Users/Nagraj/anaconda3/lib/python3.6/site-packages/OpenGL/platform/__init__.py", line 29, in _load
plugin = plugin_class()
TypeError: 'NoneType' object is not callable
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `spyder/app/start.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Std imports
4 import os
5 import os.path as osp
6 import random
7 import socket
8 import sys
9 import time
10
11 # To prevent a race condition with ZMQ
12 # See issue 5324
13 import zmq
14
15 # This import is needed to fix errors with OpenGL when installed using pip
16 # See issue 3332
17 try:
18 from OpenGL import GL
19 except ImportError:
20 # pyopengl is not present when installed using conda
21 pass
22
23 # Local imports
24 from spyder.app.cli_options import get_options
25 from spyder.config.base import get_conf_path, running_in_mac_app
26 from spyder.config.main import CONF
27 from spyder.utils.external import lockfile
28 from spyder.py3compat import is_unicode
29
30
31 def send_args_to_spyder(args):
32 """
33 Simple socket client used to send the args passed to the Spyder
34 executable to an already running instance.
35
36 Args can be Python scripts or files with these extensions: .spydata, .mat,
37 .npy, or .h5, which can be imported by the Variable Explorer.
38 """
39 port = CONF.get('main', 'open_files_port')
40
41 # Wait ~50 secs for the server to be up
42 # Taken from http://stackoverflow.com/a/4766598/438386
43 for _x in range(200):
44 try:
45 for arg in args:
46 client = socket.socket(socket.AF_INET, socket.SOCK_STREAM,
47 socket.IPPROTO_TCP)
48 client.connect(("127.0.0.1", port))
49 if is_unicode(arg):
50 arg = arg.encode('utf-8')
51 client.send(osp.abspath(arg))
52 client.close()
53 except socket.error:
54 time.sleep(0.25)
55 continue
56 break
57
58
59 def main():
60 """
61 Start Spyder application.
62
63 If single instance mode is turned on (default behavior) and an instance of
64 Spyder is already running, this will just parse and send command line
65 options to the application.
66 """
67 # Parse command line options
68 options, args = get_options()
69
70 # Store variable to be used in self.restart (restart spyder instance)
71 os.environ['SPYDER_ARGS'] = str(sys.argv[1:])
72
73 #==========================================================================
74 # Proper high DPI scaling is available in Qt >= 5.6.0. This attibute must
75 # be set before creating the application.
76 #==========================================================================
77 if CONF.get('main', 'high_dpi_custom_scale_factor'):
78 factors = str(CONF.get('main', 'high_dpi_custom_scale_factors'))
79 f = list(filter(None, factors.split(';')))
80 if len(f) == 1:
81 os.environ['QT_SCALE_FACTOR'] = f[0]
82 else:
83 os.environ['QT_SCREEN_SCALE_FACTORS'] = factors
84 else:
85 os.environ['QT_SCALE_FACTOR'] = ''
86 os.environ['QT_SCREEN_SCALE_FACTORS'] = ''
87
88 # Prevent Spyder from crashing in macOS if locale is not defined
89 if sys.platform == 'darwin':
90 LANG = os.environ.get('LANG')
91 LC_ALL = os.environ.get('LC_ALL')
92 if bool(LANG) and not bool(LC_ALL):
93 LC_ALL = LANG
94 elif not bool(LANG) and bool(LC_ALL):
95 LANG = LC_ALL
96 else:
97 LANG = LC_ALL = 'en_US.UTF-8'
98
99 os.environ['LANG'] = LANG
100 os.environ['LC_ALL'] = LC_ALL
101
102 if CONF.get('main', 'single_instance') and not options.new_instance \
103 and not options.reset_config_files and not running_in_mac_app():
104 # Minimal delay (0.1-0.2 secs) to avoid that several
105 # instances started at the same time step in their
106 # own foots while trying to create the lock file
107 time.sleep(random.randrange(1000, 2000, 90)/10000.)
108
109 # Lock file creation
110 lock_file = get_conf_path('spyder.lock')
111 lock = lockfile.FilesystemLock(lock_file)
112
113 # Try to lock spyder.lock. If it's *possible* to do it, then
114 # there is no previous instance running and we can start a
115 # new one. If *not*, then there is an instance already
116 # running, which is locking that file
117 try:
118 lock_created = lock.lock()
119 except:
120 # If locking fails because of errors in the lockfile
121 # module, try to remove a possibly stale spyder.lock.
122 # This is reported to solve all problems with
123 # lockfile (See issue 2363)
124 try:
125 if os.name == 'nt':
126 if osp.isdir(lock_file):
127 import shutil
128 shutil.rmtree(lock_file, ignore_errors=True)
129 else:
130 if osp.islink(lock_file):
131 os.unlink(lock_file)
132 except:
133 pass
134
135 # Then start Spyder as usual and *don't* continue
136 # executing this script because it doesn't make
137 # sense
138 from spyder.app import mainwindow
139 mainwindow.main()
140 return
141
142 if lock_created:
143 # Start a new instance
144 from spyder.app import mainwindow
145 mainwindow.main()
146 else:
147 # Pass args to Spyder or print an informative
148 # message
149 if args:
150 send_args_to_spyder(args)
151 else:
152 print("Spyder is already running. If you want to open a new \n"
153 "instance, please pass to it the --new-instance option")
154 else:
155 from spyder.app import mainwindow
156 mainwindow.main()
157
158
159 if __name__ == "__main__":
160 main()
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/spyder/app/start.py b/spyder/app/start.py
--- a/spyder/app/start.py
+++ b/spyder/app/start.py
@@ -16,8 +16,7 @@
# See issue 3332
try:
from OpenGL import GL
-except ImportError:
- # pyopengl is not present when installed using conda
+except:
pass
# Local imports
| {"golden_diff": "diff --git a/spyder/app/start.py b/spyder/app/start.py\n--- a/spyder/app/start.py\n+++ b/spyder/app/start.py\n@@ -16,8 +16,7 @@\n # See issue 3332\r\n try:\r\n from OpenGL import GL\r\n-except ImportError:\r\n- # pyopengl is not present when installed using conda\r\n+except:\r\n pass\r\n \r\n # Local imports\n", "issue": "Spyder not starting in macOS because pyopengl is present\n\r\n\r\nI updated python and spyder using \"conda update spyder\" and \"conda update python\" respectively on MacOSX. Is this a python error or a spyder error? \r\n\r\nPlease find attached the conda list output listing the versions.\r\n\r\nThanks for your support. \r\n--------\r\nTraceback (most recent call last):\r\nFile \"/Users/Nagraj/anaconda3/bin/spyder\", line 7, in \r\n[conda_list.txt](https://github.com/spyder-ide/spyder/files/1590875/conda_list.txt)\r\n\r\nfrom spyder.app.start import main\r\nFile \"/Users/Nagraj/anaconda3/lib/python3.6/site-packages/spyder/app/start.py\", line 19, in \r\nfrom OpenGL import GL\r\nFile \"/Users/Nagraj/anaconda3/lib/python3.6/site-packages/OpenGL/GL/__init__.py\", line 3, in \r\nfrom OpenGL import error as _error\r\nFile \"/Users/Nagraj/anaconda3/lib/python3.6/site-packages/OpenGL/error.py\", line 12, in \r\nfrom OpenGL import platform, _configflags\r\nFile \"/Users/Nagraj/anaconda3/lib/python3.6/site-packages/OpenGL/platform/__init__.py\", line 35, in \r\n_load()\r\nFile \"/Users/Nagraj/anaconda3/lib/python3.6/site-packages/OpenGL/platform/__init__.py\", line 29, in _load\r\nplugin = plugin_class()\r\nTypeError: 'NoneType' object is not callable\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\r\n\r\n# Std imports\r\nimport os\r\nimport os.path as osp\r\nimport random\r\nimport socket\r\nimport sys\r\nimport time\r\n\r\n# To prevent a race condition with ZMQ\r\n# See issue 5324\r\nimport zmq\r\n\r\n# This import is needed to fix errors with OpenGL when installed using pip\r\n# See issue 3332\r\ntry:\r\n from OpenGL import GL\r\nexcept ImportError:\r\n # pyopengl is not present when installed using conda\r\n pass\r\n\r\n# Local imports\r\nfrom spyder.app.cli_options import get_options\r\nfrom spyder.config.base import get_conf_path, running_in_mac_app\r\nfrom spyder.config.main import CONF\r\nfrom spyder.utils.external import lockfile\r\nfrom spyder.py3compat import is_unicode\r\n\r\n\r\ndef send_args_to_spyder(args):\r\n \"\"\"\r\n Simple socket client used to send the args passed to the Spyder \r\n executable to an already running instance.\r\n\r\n Args can be Python scripts or files with these extensions: .spydata, .mat,\r\n .npy, or .h5, which can be imported by the Variable Explorer.\r\n \"\"\"\r\n port = CONF.get('main', 'open_files_port')\r\n\r\n # Wait ~50 secs for the server to be up\r\n # Taken from http://stackoverflow.com/a/4766598/438386\r\n for _x in range(200):\r\n try:\r\n for arg in args:\r\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM,\r\n socket.IPPROTO_TCP)\r\n client.connect((\"127.0.0.1\", port))\r\n if is_unicode(arg):\r\n arg = arg.encode('utf-8')\r\n client.send(osp.abspath(arg))\r\n client.close()\r\n except socket.error:\r\n time.sleep(0.25)\r\n continue\r\n break\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n Start Spyder application.\r\n\r\n If single instance mode is turned on (default behavior) and an instance of\r\n Spyder is already running, this will just parse and send command line\r\n options to the application.\r\n \"\"\"\r\n # Parse command line options\r\n options, args = get_options()\r\n\r\n # Store variable to be used in self.restart (restart spyder instance)\r\n os.environ['SPYDER_ARGS'] = str(sys.argv[1:])\r\n\r\n #==========================================================================\r\n # Proper high DPI scaling is available in Qt >= 5.6.0. This attibute must\r\n # be set before creating the application.\r\n #==========================================================================\r\n if CONF.get('main', 'high_dpi_custom_scale_factor'):\r\n factors = str(CONF.get('main', 'high_dpi_custom_scale_factors'))\r\n f = list(filter(None, factors.split(';')))\r\n if len(f) == 1:\r\n os.environ['QT_SCALE_FACTOR'] = f[0]\r\n else:\r\n os.environ['QT_SCREEN_SCALE_FACTORS'] = factors\r\n else:\r\n os.environ['QT_SCALE_FACTOR'] = ''\r\n os.environ['QT_SCREEN_SCALE_FACTORS'] = ''\r\n\r\n # Prevent Spyder from crashing in macOS if locale is not defined\r\n if sys.platform == 'darwin':\r\n LANG = os.environ.get('LANG')\r\n LC_ALL = os.environ.get('LC_ALL')\r\n if bool(LANG) and not bool(LC_ALL):\r\n LC_ALL = LANG\r\n elif not bool(LANG) and bool(LC_ALL):\r\n LANG = LC_ALL\r\n else:\r\n LANG = LC_ALL = 'en_US.UTF-8'\r\n\r\n os.environ['LANG'] = LANG\r\n os.environ['LC_ALL'] = LC_ALL\r\n\r\n if CONF.get('main', 'single_instance') and not options.new_instance \\\r\n and not options.reset_config_files and not running_in_mac_app():\r\n # Minimal delay (0.1-0.2 secs) to avoid that several\r\n # instances started at the same time step in their\r\n # own foots while trying to create the lock file\r\n time.sleep(random.randrange(1000, 2000, 90)/10000.)\r\n\r\n # Lock file creation\r\n lock_file = get_conf_path('spyder.lock')\r\n lock = lockfile.FilesystemLock(lock_file)\r\n\r\n # Try to lock spyder.lock. If it's *possible* to do it, then\r\n # there is no previous instance running and we can start a\r\n # new one. If *not*, then there is an instance already\r\n # running, which is locking that file\r\n try:\r\n lock_created = lock.lock()\r\n except:\r\n # If locking fails because of errors in the lockfile\r\n # module, try to remove a possibly stale spyder.lock.\r\n # This is reported to solve all problems with\r\n # lockfile (See issue 2363)\r\n try:\r\n if os.name == 'nt':\r\n if osp.isdir(lock_file):\r\n import shutil\r\n shutil.rmtree(lock_file, ignore_errors=True)\r\n else:\r\n if osp.islink(lock_file):\r\n os.unlink(lock_file)\r\n except:\r\n pass\r\n\r\n # Then start Spyder as usual and *don't* continue\r\n # executing this script because it doesn't make\r\n # sense\r\n from spyder.app import mainwindow\r\n mainwindow.main()\r\n return\r\n\r\n if lock_created:\r\n # Start a new instance\r\n from spyder.app import mainwindow\r\n mainwindow.main()\r\n else:\r\n # Pass args to Spyder or print an informative\r\n # message\r\n if args:\r\n send_args_to_spyder(args)\r\n else:\r\n print(\"Spyder is already running. If you want to open a new \\n\"\r\n \"instance, please pass to it the --new-instance option\")\r\n else:\r\n from spyder.app import mainwindow\r\n mainwindow.main()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n", "path": "spyder/app/start.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\r\n\r\n# Std imports\r\nimport os\r\nimport os.path as osp\r\nimport random\r\nimport socket\r\nimport sys\r\nimport time\r\n\r\n# To prevent a race condition with ZMQ\r\n# See issue 5324\r\nimport zmq\r\n\r\n# This import is needed to fix errors with OpenGL when installed using pip\r\n# See issue 3332\r\ntry:\r\n from OpenGL import GL\r\nexcept:\r\n pass\r\n\r\n# Local imports\r\nfrom spyder.app.cli_options import get_options\r\nfrom spyder.config.base import get_conf_path, running_in_mac_app\r\nfrom spyder.config.main import CONF\r\nfrom spyder.utils.external import lockfile\r\nfrom spyder.py3compat import is_unicode\r\n\r\n\r\ndef send_args_to_spyder(args):\r\n \"\"\"\r\n Simple socket client used to send the args passed to the Spyder \r\n executable to an already running instance.\r\n\r\n Args can be Python scripts or files with these extensions: .spydata, .mat,\r\n .npy, or .h5, which can be imported by the Variable Explorer.\r\n \"\"\"\r\n port = CONF.get('main', 'open_files_port')\r\n\r\n # Wait ~50 secs for the server to be up\r\n # Taken from http://stackoverflow.com/a/4766598/438386\r\n for _x in range(200):\r\n try:\r\n for arg in args:\r\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM,\r\n socket.IPPROTO_TCP)\r\n client.connect((\"127.0.0.1\", port))\r\n if is_unicode(arg):\r\n arg = arg.encode('utf-8')\r\n client.send(osp.abspath(arg))\r\n client.close()\r\n except socket.error:\r\n time.sleep(0.25)\r\n continue\r\n break\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n Start Spyder application.\r\n\r\n If single instance mode is turned on (default behavior) and an instance of\r\n Spyder is already running, this will just parse and send command line\r\n options to the application.\r\n \"\"\"\r\n # Parse command line options\r\n options, args = get_options()\r\n\r\n # Store variable to be used in self.restart (restart spyder instance)\r\n os.environ['SPYDER_ARGS'] = str(sys.argv[1:])\r\n\r\n #==========================================================================\r\n # Proper high DPI scaling is available in Qt >= 5.6.0. This attibute must\r\n # be set before creating the application.\r\n #==========================================================================\r\n if CONF.get('main', 'high_dpi_custom_scale_factor'):\r\n factors = str(CONF.get('main', 'high_dpi_custom_scale_factors'))\r\n f = list(filter(None, factors.split(';')))\r\n if len(f) == 1:\r\n os.environ['QT_SCALE_FACTOR'] = f[0]\r\n else:\r\n os.environ['QT_SCREEN_SCALE_FACTORS'] = factors\r\n else:\r\n os.environ['QT_SCALE_FACTOR'] = ''\r\n os.environ['QT_SCREEN_SCALE_FACTORS'] = ''\r\n\r\n # Prevent Spyder from crashing in macOS if locale is not defined\r\n if sys.platform == 'darwin':\r\n LANG = os.environ.get('LANG')\r\n LC_ALL = os.environ.get('LC_ALL')\r\n if bool(LANG) and not bool(LC_ALL):\r\n LC_ALL = LANG\r\n elif not bool(LANG) and bool(LC_ALL):\r\n LANG = LC_ALL\r\n else:\r\n LANG = LC_ALL = 'en_US.UTF-8'\r\n\r\n os.environ['LANG'] = LANG\r\n os.environ['LC_ALL'] = LC_ALL\r\n\r\n if CONF.get('main', 'single_instance') and not options.new_instance \\\r\n and not options.reset_config_files and not running_in_mac_app():\r\n # Minimal delay (0.1-0.2 secs) to avoid that several\r\n # instances started at the same time step in their\r\n # own foots while trying to create the lock file\r\n time.sleep(random.randrange(1000, 2000, 90)/10000.)\r\n\r\n # Lock file creation\r\n lock_file = get_conf_path('spyder.lock')\r\n lock = lockfile.FilesystemLock(lock_file)\r\n\r\n # Try to lock spyder.lock. If it's *possible* to do it, then\r\n # there is no previous instance running and we can start a\r\n # new one. If *not*, then there is an instance already\r\n # running, which is locking that file\r\n try:\r\n lock_created = lock.lock()\r\n except:\r\n # If locking fails because of errors in the lockfile\r\n # module, try to remove a possibly stale spyder.lock.\r\n # This is reported to solve all problems with\r\n # lockfile (See issue 2363)\r\n try:\r\n if os.name == 'nt':\r\n if osp.isdir(lock_file):\r\n import shutil\r\n shutil.rmtree(lock_file, ignore_errors=True)\r\n else:\r\n if osp.islink(lock_file):\r\n os.unlink(lock_file)\r\n except:\r\n pass\r\n\r\n # Then start Spyder as usual and *don't* continue\r\n # executing this script because it doesn't make\r\n # sense\r\n from spyder.app import mainwindow\r\n mainwindow.main()\r\n return\r\n\r\n if lock_created:\r\n # Start a new instance\r\n from spyder.app import mainwindow\r\n mainwindow.main()\r\n else:\r\n # Pass args to Spyder or print an informative\r\n # message\r\n if args:\r\n send_args_to_spyder(args)\r\n else:\r\n print(\"Spyder is already running. If you want to open a new \\n\"\r\n \"instance, please pass to it the --new-instance option\")\r\n else:\r\n from spyder.app import mainwindow\r\n mainwindow.main()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n", "path": "spyder/app/start.py"}]} | 2,235 | 95 |
gh_patches_debug_18013 | rasdani/github-patches | git_diff | lk-geimfari__mimesis-446 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
romanized decorator mutates ROMANIZATION_DICT
After `@romanized` is used, ROMANIZATION_DICT gets updated and every module importing it will get this mutated ROMANIZATION_DICT.
Snippet below should reproduce problem.
```
from mimesis import decorators, data
if __name__ == '__main__':
print('ROMANIZATION_DICT: before')
print(data.ROMANIZATION_DICT)
@decorators.romanized('ru')
def russian_name(): return 'Петр Петрович'
# next line is where ROMANIZATION_DICT mutates
russian_name()
print('ROMANIZATION_DICT: after')
print(data.ROMANIZATION_DICT)
```
Problem is here:
https://github.com/lk-geimfari/mimesis/blob/master/mimesis/decorators.py#L29
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mimesis/decorators.py`
Content:
```
1 """Decorators for the public API and for internal purpose."""
2
3 import functools
4 from string import ascii_letters as letters
5 from string import digits, punctuation
6 from typing import Callable
7
8 from mimesis import data
9 from mimesis.exceptions import UnsupportedLocale
10
11
12 def romanized(locale: str = '') -> Callable:
13 """Romanize the Cyrillic text.
14
15 Transliterate the Cyrillic language from the Cyrillic
16 script into the Latin alphabet.
17
18 .. note:: At this moment it works only for `ru`, `uk`, `kk`.
19
20 :param locale: Locale code.
21 :return: Latinized text.
22 """
23 def romanized_deco(func):
24 @functools.wraps(func)
25 def wrapper(*args, **kwargs):
26 try:
27 alphabet = data.ROMANIZATION_DICT[locale]
28 # Add common cyrillic common letters
29 alphabet.update(data.COMMON_LETTERS)
30 # String can contain ascii symbols, digits and
31 # punctuation symbols.
32 alphabet.update({s: s for s in
33 letters + digits + punctuation})
34 except KeyError:
35 raise UnsupportedLocale(locale)
36 result = func(*args, **kwargs)
37 txt = ''.join([alphabet[i] for i in result if i in alphabet])
38 return txt
39
40 return wrapper
41
42 return romanized_deco
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mimesis/decorators.py b/mimesis/decorators.py
--- a/mimesis/decorators.py
+++ b/mimesis/decorators.py
@@ -24,13 +24,13 @@
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
- alphabet = data.ROMANIZATION_DICT[locale]
- # Add common cyrillic common letters
- alphabet.update(data.COMMON_LETTERS)
# String can contain ascii symbols, digits and
# punctuation symbols.
- alphabet.update({s: s for s in
- letters + digits + punctuation})
+ alphabet = {s: s for s in
+ letters + digits + punctuation}
+ alphabet.update(data.ROMANIZATION_DICT[locale])
+ # Add common cyrillic letters
+ alphabet.update(data.COMMON_LETTERS)
except KeyError:
raise UnsupportedLocale(locale)
result = func(*args, **kwargs)
| {"golden_diff": "diff --git a/mimesis/decorators.py b/mimesis/decorators.py\n--- a/mimesis/decorators.py\n+++ b/mimesis/decorators.py\n@@ -24,13 +24,13 @@\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n- alphabet = data.ROMANIZATION_DICT[locale]\n- # Add common cyrillic common letters\n- alphabet.update(data.COMMON_LETTERS)\n # String can contain ascii symbols, digits and\n # punctuation symbols.\n- alphabet.update({s: s for s in\n- letters + digits + punctuation})\n+ alphabet = {s: s for s in\n+ letters + digits + punctuation}\n+ alphabet.update(data.ROMANIZATION_DICT[locale])\n+ # Add common cyrillic letters\n+ alphabet.update(data.COMMON_LETTERS)\n except KeyError:\n raise UnsupportedLocale(locale)\n result = func(*args, **kwargs)\n", "issue": "romanized decorator mutates ROMANIZATION_DICT\nAfter `@romanized` is used, ROMANIZATION_DICT gets updated and every module importing it will get this mutated ROMANIZATION_DICT.\r\nSnippet below should reproduce problem.\r\n```\r\nfrom mimesis import decorators, data\r\n\r\n\r\nif __name__ == '__main__':\r\n print('ROMANIZATION_DICT: before')\r\n print(data.ROMANIZATION_DICT)\r\n\r\n @decorators.romanized('ru')\r\n def russian_name(): return '\u041f\u0435\u0442\u0440 \u041f\u0435\u0442\u0440\u043e\u0432\u0438\u0447'\r\n # next line is where ROMANIZATION_DICT mutates\r\n russian_name()\r\n\r\n print('ROMANIZATION_DICT: after')\r\n print(data.ROMANIZATION_DICT)\r\n```\r\nProblem is here:\r\nhttps://github.com/lk-geimfari/mimesis/blob/master/mimesis/decorators.py#L29\r\n\n", "before_files": [{"content": "\"\"\"Decorators for the public API and for internal purpose.\"\"\"\n\nimport functools\nfrom string import ascii_letters as letters\nfrom string import digits, punctuation\nfrom typing import Callable\n\nfrom mimesis import data\nfrom mimesis.exceptions import UnsupportedLocale\n\n\ndef romanized(locale: str = '') -> Callable:\n \"\"\"Romanize the Cyrillic text.\n\n Transliterate the Cyrillic language from the Cyrillic\n script into the Latin alphabet.\n\n .. note:: At this moment it works only for `ru`, `uk`, `kk`.\n\n :param locale: Locale code.\n :return: Latinized text.\n \"\"\"\n def romanized_deco(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n alphabet = data.ROMANIZATION_DICT[locale]\n # Add common cyrillic common letters\n alphabet.update(data.COMMON_LETTERS)\n # String can contain ascii symbols, digits and\n # punctuation symbols.\n alphabet.update({s: s for s in\n letters + digits + punctuation})\n except KeyError:\n raise UnsupportedLocale(locale)\n result = func(*args, **kwargs)\n txt = ''.join([alphabet[i] for i in result if i in alphabet])\n return txt\n\n return wrapper\n\n return romanized_deco\n", "path": "mimesis/decorators.py"}], "after_files": [{"content": "\"\"\"Decorators for the public API and for internal purpose.\"\"\"\n\nimport functools\nfrom string import ascii_letters as letters\nfrom string import digits, punctuation\nfrom typing import Callable\n\nfrom mimesis import data\nfrom mimesis.exceptions import UnsupportedLocale\n\n\ndef romanized(locale: str = '') -> Callable:\n \"\"\"Romanize the Cyrillic text.\n\n Transliterate the Cyrillic language from the Cyrillic\n script into the Latin alphabet.\n\n .. note:: At this moment it works only for `ru`, `uk`, `kk`.\n\n :param locale: Locale code.\n :return: Latinized text.\n \"\"\"\n def romanized_deco(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n # String can contain ascii symbols, digits and\n # punctuation symbols.\n alphabet = {s: s for s in\n letters + digits + punctuation}\n alphabet.update(data.ROMANIZATION_DICT[locale])\n # Add common cyrillic letters\n alphabet.update(data.COMMON_LETTERS)\n except KeyError:\n raise UnsupportedLocale(locale)\n result = func(*args, **kwargs)\n txt = ''.join([alphabet[i] for i in result if i in alphabet])\n return txt\n\n return wrapper\n\n return romanized_deco\n", "path": "mimesis/decorators.py"}]} | 800 | 220 |
gh_patches_debug_26944 | rasdani/github-patches | git_diff | Qiskit__qiskit-12321 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add insert_barrier argument to UnitaryOverlap
### What should we add?
This argument would insert a barrier between the two unitaries. This is useful if you want to prevent circuit optimization between the two parts.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/circuit/library/overlap.py`
Content:
```
1 # This code is part of Qiskit.
2 #
3 # (C) Copyright IBM 2023.
4 #
5 # This code is licensed under the Apache License, Version 2.0. You may
6 # obtain a copy of this license in the LICENSE.txt file in the root directory
7 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
8 #
9 # Any modifications or derivative works of this code must retain this
10 # copyright notice, and modified files need to carry a notice indicating
11 # that they have been altered from the originals.
12
13 """Unitary overlap circuit."""
14
15 from qiskit.circuit import QuantumCircuit, Gate
16 from qiskit.circuit.parametervector import ParameterVector
17 from qiskit.circuit.exceptions import CircuitError
18 from qiskit.circuit import Barrier
19
20
21 class UnitaryOverlap(QuantumCircuit):
22 r"""Circuit that returns the overlap between two unitaries :math:`U_2^{\dag} U_1`.
23
24 The input quantum circuits must represent unitary operations, since they must be invertible.
25 If the inputs will have parameters, they are replaced by :class:`.ParameterVector`\s with
26 names `"p1"` (for circuit ``unitary1``) and `"p2"` (for circuit ``unitary_2``) in the output
27 circuit.
28
29 This circuit is usually employed in computing the fidelity:
30
31 .. math::
32
33 \left|\langle 0| U_2^{\dag} U_1|0\rangle\right|^{2}
34
35 by computing the probability of being in the all-zeros bit-string, or equivalently,
36 the expectation value of projector :math:`|0\rangle\langle 0|`.
37
38 Example::
39
40 import numpy as np
41 from qiskit.circuit.library import EfficientSU2, UnitaryOverlap
42 from qiskit.primitives import Sampler
43
44 # get two circuit to prepare states of which we comput the overlap
45 circuit = EfficientSU2(2, reps=1)
46 unitary1 = circuit.assign_parameters(np.random.random(circuit.num_parameters))
47 unitary2 = circuit.assign_parameters(np.random.random(circuit.num_parameters))
48
49 # create the overlap circuit
50 overlap = UnitaryOverap(unitary1, unitary2)
51
52 # sample from the overlap
53 sampler = Sampler(options={"shots": 100})
54 result = sampler.run(overlap).result()
55
56 # the fidelity is the probability to measure 0
57 fidelity = result.quasi_dists[0].get(0, 0)
58
59 """
60
61 def __init__(
62 self, unitary1: QuantumCircuit, unitary2: QuantumCircuit, prefix1="p1", prefix2="p2"
63 ):
64 """
65 Args:
66 unitary1: Unitary acting on the ket vector.
67 unitary2: Unitary whose inverse operates on the bra vector.
68 prefix1: The name of the parameter vector associated to ``unitary1``,
69 if it is parameterized. Defaults to ``"p1"``.
70 prefix2: The name of the parameter vector associated to ``unitary2``,
71 if it is parameterized. Defaults to ``"p2"``.
72
73 Raises:
74 CircuitError: Number of qubits in ``unitary1`` and ``unitary2`` does not match.
75 CircuitError: Inputs contain measurements and/or resets.
76 """
77 # check inputs are valid
78 if unitary1.num_qubits != unitary2.num_qubits:
79 raise CircuitError(
80 f"Number of qubits in unitaries does "
81 f"not match: {unitary1.num_qubits} != {unitary2.num_qubits}."
82 )
83
84 unitaries = [unitary1, unitary2]
85 for unitary in unitaries:
86 _check_unitary(unitary)
87
88 # Vectors of new parameters, if any. Need the unitaries in a list here to ensure
89 # we can overwrite them.
90 for i, prefix in enumerate([prefix1, prefix2]):
91 if unitaries[i].num_parameters > 0:
92 new_params = ParameterVector(prefix, unitaries[i].num_parameters)
93 unitaries[i] = unitaries[i].assign_parameters(new_params)
94
95 # Generate the actual overlap circuit
96 super().__init__(unitaries[0].num_qubits, name="UnitaryOverlap")
97 self.compose(unitaries[0], inplace=True)
98 self.compose(unitaries[1].inverse(), inplace=True)
99
100
101 def _check_unitary(circuit):
102 """Check a circuit is unitary by checking if all operations are of type ``Gate``."""
103
104 for instruction in circuit.data:
105 if not isinstance(instruction.operation, (Gate, Barrier)):
106 raise CircuitError(
107 (
108 "One or more instructions cannot be converted to"
109 ' a gate. "{}" is not a gate instruction'
110 ).format(instruction.operation.name)
111 )
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qiskit/circuit/library/overlap.py b/qiskit/circuit/library/overlap.py
--- a/qiskit/circuit/library/overlap.py
+++ b/qiskit/circuit/library/overlap.py
@@ -59,7 +59,12 @@
"""
def __init__(
- self, unitary1: QuantumCircuit, unitary2: QuantumCircuit, prefix1="p1", prefix2="p2"
+ self,
+ unitary1: QuantumCircuit,
+ unitary2: QuantumCircuit,
+ prefix1: str = "p1",
+ prefix2: str = "p2",
+ insert_barrier: bool = False,
):
"""
Args:
@@ -69,6 +74,7 @@
if it is parameterized. Defaults to ``"p1"``.
prefix2: The name of the parameter vector associated to ``unitary2``,
if it is parameterized. Defaults to ``"p2"``.
+ insert_barrier: Whether to insert a barrier between the two unitaries.
Raises:
CircuitError: Number of qubits in ``unitary1`` and ``unitary2`` does not match.
@@ -95,6 +101,8 @@
# Generate the actual overlap circuit
super().__init__(unitaries[0].num_qubits, name="UnitaryOverlap")
self.compose(unitaries[0], inplace=True)
+ if insert_barrier:
+ self.barrier()
self.compose(unitaries[1].inverse(), inplace=True)
| {"golden_diff": "diff --git a/qiskit/circuit/library/overlap.py b/qiskit/circuit/library/overlap.py\n--- a/qiskit/circuit/library/overlap.py\n+++ b/qiskit/circuit/library/overlap.py\n@@ -59,7 +59,12 @@\n \"\"\"\n \n def __init__(\n- self, unitary1: QuantumCircuit, unitary2: QuantumCircuit, prefix1=\"p1\", prefix2=\"p2\"\n+ self,\n+ unitary1: QuantumCircuit,\n+ unitary2: QuantumCircuit,\n+ prefix1: str = \"p1\",\n+ prefix2: str = \"p2\",\n+ insert_barrier: bool = False,\n ):\n \"\"\"\n Args:\n@@ -69,6 +74,7 @@\n if it is parameterized. Defaults to ``\"p1\"``.\n prefix2: The name of the parameter vector associated to ``unitary2``,\n if it is parameterized. Defaults to ``\"p2\"``.\n+ insert_barrier: Whether to insert a barrier between the two unitaries.\n \n Raises:\n CircuitError: Number of qubits in ``unitary1`` and ``unitary2`` does not match.\n@@ -95,6 +101,8 @@\n # Generate the actual overlap circuit\n super().__init__(unitaries[0].num_qubits, name=\"UnitaryOverlap\")\n self.compose(unitaries[0], inplace=True)\n+ if insert_barrier:\n+ self.barrier()\n self.compose(unitaries[1].inverse(), inplace=True)\n", "issue": "Add insert_barrier argument to UnitaryOverlap\n### What should we add?\n\nThis argument would insert a barrier between the two unitaries. This is useful if you want to prevent circuit optimization between the two parts.\n", "before_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2023.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Unitary overlap circuit.\"\"\"\n\nfrom qiskit.circuit import QuantumCircuit, Gate\nfrom qiskit.circuit.parametervector import ParameterVector\nfrom qiskit.circuit.exceptions import CircuitError\nfrom qiskit.circuit import Barrier\n\n\nclass UnitaryOverlap(QuantumCircuit):\n r\"\"\"Circuit that returns the overlap between two unitaries :math:`U_2^{\\dag} U_1`.\n\n The input quantum circuits must represent unitary operations, since they must be invertible.\n If the inputs will have parameters, they are replaced by :class:`.ParameterVector`\\s with\n names `\"p1\"` (for circuit ``unitary1``) and `\"p2\"` (for circuit ``unitary_2``) in the output\n circuit.\n\n This circuit is usually employed in computing the fidelity:\n\n .. math::\n\n \\left|\\langle 0| U_2^{\\dag} U_1|0\\rangle\\right|^{2}\n\n by computing the probability of being in the all-zeros bit-string, or equivalently,\n the expectation value of projector :math:`|0\\rangle\\langle 0|`.\n\n Example::\n\n import numpy as np\n from qiskit.circuit.library import EfficientSU2, UnitaryOverlap\n from qiskit.primitives import Sampler\n\n # get two circuit to prepare states of which we comput the overlap\n circuit = EfficientSU2(2, reps=1)\n unitary1 = circuit.assign_parameters(np.random.random(circuit.num_parameters))\n unitary2 = circuit.assign_parameters(np.random.random(circuit.num_parameters))\n\n # create the overlap circuit\n overlap = UnitaryOverap(unitary1, unitary2)\n\n # sample from the overlap\n sampler = Sampler(options={\"shots\": 100})\n result = sampler.run(overlap).result()\n\n # the fidelity is the probability to measure 0\n fidelity = result.quasi_dists[0].get(0, 0)\n\n \"\"\"\n\n def __init__(\n self, unitary1: QuantumCircuit, unitary2: QuantumCircuit, prefix1=\"p1\", prefix2=\"p2\"\n ):\n \"\"\"\n Args:\n unitary1: Unitary acting on the ket vector.\n unitary2: Unitary whose inverse operates on the bra vector.\n prefix1: The name of the parameter vector associated to ``unitary1``,\n if it is parameterized. Defaults to ``\"p1\"``.\n prefix2: The name of the parameter vector associated to ``unitary2``,\n if it is parameterized. Defaults to ``\"p2\"``.\n\n Raises:\n CircuitError: Number of qubits in ``unitary1`` and ``unitary2`` does not match.\n CircuitError: Inputs contain measurements and/or resets.\n \"\"\"\n # check inputs are valid\n if unitary1.num_qubits != unitary2.num_qubits:\n raise CircuitError(\n f\"Number of qubits in unitaries does \"\n f\"not match: {unitary1.num_qubits} != {unitary2.num_qubits}.\"\n )\n\n unitaries = [unitary1, unitary2]\n for unitary in unitaries:\n _check_unitary(unitary)\n\n # Vectors of new parameters, if any. Need the unitaries in a list here to ensure\n # we can overwrite them.\n for i, prefix in enumerate([prefix1, prefix2]):\n if unitaries[i].num_parameters > 0:\n new_params = ParameterVector(prefix, unitaries[i].num_parameters)\n unitaries[i] = unitaries[i].assign_parameters(new_params)\n\n # Generate the actual overlap circuit\n super().__init__(unitaries[0].num_qubits, name=\"UnitaryOverlap\")\n self.compose(unitaries[0], inplace=True)\n self.compose(unitaries[1].inverse(), inplace=True)\n\n\ndef _check_unitary(circuit):\n \"\"\"Check a circuit is unitary by checking if all operations are of type ``Gate``.\"\"\"\n\n for instruction in circuit.data:\n if not isinstance(instruction.operation, (Gate, Barrier)):\n raise CircuitError(\n (\n \"One or more instructions cannot be converted to\"\n ' a gate. \"{}\" is not a gate instruction'\n ).format(instruction.operation.name)\n )\n", "path": "qiskit/circuit/library/overlap.py"}], "after_files": [{"content": "# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2023.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Unitary overlap circuit.\"\"\"\n\nfrom qiskit.circuit import QuantumCircuit, Gate\nfrom qiskit.circuit.parametervector import ParameterVector\nfrom qiskit.circuit.exceptions import CircuitError\nfrom qiskit.circuit import Barrier\n\n\nclass UnitaryOverlap(QuantumCircuit):\n r\"\"\"Circuit that returns the overlap between two unitaries :math:`U_2^{\\dag} U_1`.\n\n The input quantum circuits must represent unitary operations, since they must be invertible.\n If the inputs will have parameters, they are replaced by :class:`.ParameterVector`\\s with\n names `\"p1\"` (for circuit ``unitary1``) and `\"p2\"` (for circuit ``unitary_2``) in the output\n circuit.\n\n This circuit is usually employed in computing the fidelity:\n\n .. math::\n\n \\left|\\langle 0| U_2^{\\dag} U_1|0\\rangle\\right|^{2}\n\n by computing the probability of being in the all-zeros bit-string, or equivalently,\n the expectation value of projector :math:`|0\\rangle\\langle 0|`.\n\n Example::\n\n import numpy as np\n from qiskit.circuit.library import EfficientSU2, UnitaryOverlap\n from qiskit.primitives import Sampler\n\n # get two circuit to prepare states of which we comput the overlap\n circuit = EfficientSU2(2, reps=1)\n unitary1 = circuit.assign_parameters(np.random.random(circuit.num_parameters))\n unitary2 = circuit.assign_parameters(np.random.random(circuit.num_parameters))\n\n # create the overlap circuit\n overlap = UnitaryOverap(unitary1, unitary2)\n\n # sample from the overlap\n sampler = Sampler(options={\"shots\": 100})\n result = sampler.run(overlap).result()\n\n # the fidelity is the probability to measure 0\n fidelity = result.quasi_dists[0].get(0, 0)\n\n \"\"\"\n\n def __init__(\n self,\n unitary1: QuantumCircuit,\n unitary2: QuantumCircuit,\n prefix1: str = \"p1\",\n prefix2: str = \"p2\",\n insert_barrier: bool = False,\n ):\n \"\"\"\n Args:\n unitary1: Unitary acting on the ket vector.\n unitary2: Unitary whose inverse operates on the bra vector.\n prefix1: The name of the parameter vector associated to ``unitary1``,\n if it is parameterized. Defaults to ``\"p1\"``.\n prefix2: The name of the parameter vector associated to ``unitary2``,\n if it is parameterized. Defaults to ``\"p2\"``.\n insert_barrier: Whether to insert a barrier between the two unitaries.\n\n Raises:\n CircuitError: Number of qubits in ``unitary1`` and ``unitary2`` does not match.\n CircuitError: Inputs contain measurements and/or resets.\n \"\"\"\n # check inputs are valid\n if unitary1.num_qubits != unitary2.num_qubits:\n raise CircuitError(\n f\"Number of qubits in unitaries does \"\n f\"not match: {unitary1.num_qubits} != {unitary2.num_qubits}.\"\n )\n\n unitaries = [unitary1, unitary2]\n for unitary in unitaries:\n _check_unitary(unitary)\n\n # Vectors of new parameters, if any. Need the unitaries in a list here to ensure\n # we can overwrite them.\n for i, prefix in enumerate([prefix1, prefix2]):\n if unitaries[i].num_parameters > 0:\n new_params = ParameterVector(prefix, unitaries[i].num_parameters)\n unitaries[i] = unitaries[i].assign_parameters(new_params)\n\n # Generate the actual overlap circuit\n super().__init__(unitaries[0].num_qubits, name=\"UnitaryOverlap\")\n self.compose(unitaries[0], inplace=True)\n if insert_barrier:\n self.barrier()\n self.compose(unitaries[1].inverse(), inplace=True)\n\n\ndef _check_unitary(circuit):\n \"\"\"Check a circuit is unitary by checking if all operations are of type ``Gate``.\"\"\"\n\n for instruction in circuit.data:\n if not isinstance(instruction.operation, (Gate, Barrier)):\n raise CircuitError(\n (\n \"One or more instructions cannot be converted to\"\n ' a gate. \"{}\" is not a gate instruction'\n ).format(instruction.operation.name)\n )\n", "path": "qiskit/circuit/library/overlap.py"}]} | 1,600 | 346 |
gh_patches_debug_60680 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-1798 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ebola page: loading second page of datasets reloads to top of page
Would it be easy to have it load the page at the `Datasets [41]` line?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py`
Content:
```
1 '''
2 Created on Nov 3, 2014
3
4 @author: alexandru-m-g
5 '''
6
7 import logging
8 import datetime as dt
9 import decimal
10
11 import pylons.config as config
12
13 import ckan.lib.base as base
14 import ckan.logic as logic
15 import ckan.model as model
16 import ckan.common as common
17 import ckan.lib.helpers as h
18
19 render = base.render
20 get_action = logic.get_action
21 c = common.c
22 request = common.request
23 _ = common._
24
25 Decimal = decimal.Decimal
26
27 log = logging.getLogger(__name__)
28
29
30 class CrisisController(base.BaseController):
31
32 def show(self):
33
34 context = {'model': model, 'session': model.Session,
35 'user': c.user or c.author, 'for_view': True,
36 'auth_user_obj': c.userobj}
37
38 datastore_resource_id = self._get_datastore_resource_id(
39 context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None))
40 if datastore_resource_id:
41 c.top_line_items = self._get_top_line_items(
42 context, datastore_resource_id)
43
44 limit = 25
45 c.q = u'ebola'
46
47 page = int(request.params.get('page', 1))
48 data_dict = {'sort': u'metadata_modified desc',
49 'fq': '+dataset_type:dataset',
50 'rows': limit,
51 'q': c.q,
52 'start': (page - 1) * limit
53 }
54 query = get_action("package_search")(context, data_dict)
55
56 def pager_url(q=None, page=None):
57 return h.url_for('show_crisis', page=page)
58
59 c.page = h.Page(
60 collection=query['results'],
61 page=page,
62 url=pager_url,
63 item_count=query['count'],
64 items_per_page=limit
65 )
66 c.items = query['results']
67 c.item_count = query['count']
68
69 c.other_links = {}
70 c.other_links['show_more'] = h.url_for(
71 "search", **{'q': u'ebola', 'sort': u'metadata_modified desc',
72 'ext_indicator': '0'})
73
74 return render('crisis/crisis.html')
75
76 def _get_decimal_value(self, value):
77 decimal_value = Decimal(str(value)).quantize(
78 Decimal('.1'), rounding=decimal.ROUND_HALF_UP)
79 return decimal_value
80
81 def _format_results(self, result):
82 for r in result['records']:
83 d = dt.datetime.strptime(r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')
84 r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y')
85
86 modified_value = r[u'value']
87 if r[u'units'] == 'ratio':
88 modified_value *= 100.0
89 elif r[u'units'] == 'million':
90 modified_value /= 1000000.0
91
92 int_value = int(modified_value)
93 if int_value == modified_value:
94 r[u'formatted_value'] = '{:,}'.format(int_value)
95 else:
96 if r[u'units'] == 'ratio':
97 r[u'formatted_value'] = '{:,.1f}'.format(
98 self._get_decimal_value(modified_value))
99 elif r[u'units'] == 'million':
100 r[u'formatted_value'] = '{:,.1f}'.format(
101 self._get_decimal_value(modified_value))
102 #r[u'formatted_value'] += ' ' + _('million')
103
104 def _get_top_line_items(self, context, datastore_resource_id):
105 modified_context = dict(context)
106 modified_context['ignore_auth'] = True
107 result = get_action('datastore_search')(
108 modified_context, {'resource_id': datastore_resource_id})
109 if 'records' in result:
110 self._format_results(result)
111 return result['records']
112 return []
113
114 def _get_datastore_resource_id(self, context, dataset_id, resource_name):
115 try:
116 modified_context = dict(context)
117 modified_context['ignore_auth'] = True
118 dataset = get_action('package_show')(
119 modified_context, {'id': dataset_id})
120
121 if 'resources' in dataset:
122 for r in dataset['resources']:
123 if 'datastore_active' in r and r['datastore_active'] \
124 and r['name'] == resource_name:
125 return r['id']
126 return None
127 except:
128 log.warning('No dataset with id ' + dataset_id)
129 return None
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
--- a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
+++ b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
@@ -54,7 +54,8 @@
query = get_action("package_search")(context, data_dict)
def pager_url(q=None, page=None):
- return h.url_for('show_crisis', page=page)
+ url = h.url_for('show_crisis', page=page) + '#datasets-section'
+ return url
c.page = h.Page(
collection=query['results'],
| {"golden_diff": "diff --git a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n--- a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n+++ b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py\n@@ -54,7 +54,8 @@\n query = get_action(\"package_search\")(context, data_dict)\n \n def pager_url(q=None, page=None):\n- return h.url_for('show_crisis', page=page)\n+ url = h.url_for('show_crisis', page=page) + '#datasets-section'\n+ return url\n \n c.page = h.Page(\n collection=query['results'],\n", "issue": "Ebola page: loading second page of datasets reloads to top of page\nWould it be easy to have it load the page at the `Datasets [41]` line?\n\n", "before_files": [{"content": "'''\nCreated on Nov 3, 2014\n\n@author: alexandru-m-g\n'''\n\nimport logging\nimport datetime as dt\nimport decimal\n\nimport pylons.config as config\n\nimport ckan.lib.base as base\nimport ckan.logic as logic\nimport ckan.model as model\nimport ckan.common as common\nimport ckan.lib.helpers as h\n\nrender = base.render\nget_action = logic.get_action\nc = common.c\nrequest = common.request\n_ = common._\n\nDecimal = decimal.Decimal\n\nlog = logging.getLogger(__name__)\n\n\nclass CrisisController(base.BaseController):\n\n def show(self):\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True,\n 'auth_user_obj': c.userobj}\n\n datastore_resource_id = self._get_datastore_resource_id(\n context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None))\n if datastore_resource_id:\n c.top_line_items = self._get_top_line_items(\n context, datastore_resource_id)\n\n limit = 25\n c.q = u'ebola'\n\n page = int(request.params.get('page', 1))\n data_dict = {'sort': u'metadata_modified desc',\n 'fq': '+dataset_type:dataset',\n 'rows': limit,\n 'q': c.q,\n 'start': (page - 1) * limit\n }\n query = get_action(\"package_search\")(context, data_dict)\n\n def pager_url(q=None, page=None):\n return h.url_for('show_crisis', page=page)\n\n c.page = h.Page(\n collection=query['results'],\n page=page,\n url=pager_url,\n item_count=query['count'],\n items_per_page=limit\n )\n c.items = query['results']\n c.item_count = query['count']\n\n c.other_links = {}\n c.other_links['show_more'] = h.url_for(\n \"search\", **{'q': u'ebola', 'sort': u'metadata_modified desc',\n 'ext_indicator': '0'})\n\n return render('crisis/crisis.html')\n\n def _get_decimal_value(self, value):\n decimal_value = Decimal(str(value)).quantize(\n Decimal('.1'), rounding=decimal.ROUND_HALF_UP)\n return decimal_value\n\n def _format_results(self, result):\n for r in result['records']:\n d = dt.datetime.strptime(r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')\n r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y')\n\n modified_value = r[u'value']\n if r[u'units'] == 'ratio':\n modified_value *= 100.0\n elif r[u'units'] == 'million':\n modified_value /= 1000000.0\n\n int_value = int(modified_value)\n if int_value == modified_value:\n r[u'formatted_value'] = '{:,}'.format(int_value)\n else:\n if r[u'units'] == 'ratio':\n r[u'formatted_value'] = '{:,.1f}'.format(\n self._get_decimal_value(modified_value))\n elif r[u'units'] == 'million':\n r[u'formatted_value'] = '{:,.1f}'.format(\n self._get_decimal_value(modified_value))\n #r[u'formatted_value'] += ' ' + _('million')\n\n def _get_top_line_items(self, context, datastore_resource_id):\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n result = get_action('datastore_search')(\n modified_context, {'resource_id': datastore_resource_id})\n if 'records' in result:\n self._format_results(result)\n return result['records']\n return []\n\n def _get_datastore_resource_id(self, context, dataset_id, resource_name):\n try:\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n dataset = get_action('package_show')(\n modified_context, {'id': dataset_id})\n\n if 'resources' in dataset:\n for r in dataset['resources']:\n if 'datastore_active' in r and r['datastore_active'] \\\n and r['name'] == resource_name:\n return r['id']\n return None\n except:\n log.warning('No dataset with id ' + dataset_id)\n return None\n", "path": "ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py"}], "after_files": [{"content": "'''\nCreated on Nov 3, 2014\n\n@author: alexandru-m-g\n'''\n\nimport logging\nimport datetime as dt\nimport decimal\n\nimport pylons.config as config\n\nimport ckan.lib.base as base\nimport ckan.logic as logic\nimport ckan.model as model\nimport ckan.common as common\nimport ckan.lib.helpers as h\n\nrender = base.render\nget_action = logic.get_action\nc = common.c\nrequest = common.request\n_ = common._\n\nDecimal = decimal.Decimal\n\nlog = logging.getLogger(__name__)\n\n\nclass CrisisController(base.BaseController):\n\n def show(self):\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True,\n 'auth_user_obj': c.userobj}\n\n datastore_resource_id = self._get_datastore_resource_id(\n context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None))\n if datastore_resource_id:\n c.top_line_items = self._get_top_line_items(\n context, datastore_resource_id)\n\n limit = 25\n c.q = u'ebola'\n\n page = int(request.params.get('page', 1))\n data_dict = {'sort': u'metadata_modified desc',\n 'fq': '+dataset_type:dataset',\n 'rows': limit,\n 'q': c.q,\n 'start': (page - 1) * limit\n }\n query = get_action(\"package_search\")(context, data_dict)\n\n def pager_url(q=None, page=None):\n url = h.url_for('show_crisis', page=page) + '#datasets-section'\n return url\n\n c.page = h.Page(\n collection=query['results'],\n page=page,\n url=pager_url,\n item_count=query['count'],\n items_per_page=limit\n )\n c.items = query['results']\n c.item_count = query['count']\n\n c.other_links = {}\n c.other_links['show_more'] = h.url_for(\n \"search\", **{'q': u'ebola', 'sort': u'metadata_modified desc',\n 'ext_indicator': '0'})\n\n return render('crisis/crisis.html')\n\n def _get_decimal_value(self, value):\n decimal_value = Decimal(str(value)).quantize(\n Decimal('.1'), rounding=decimal.ROUND_HALF_UP)\n return decimal_value\n\n def _format_results(self, result):\n for r in result['records']:\n d = dt.datetime.strptime(r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')\n r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y')\n\n modified_value = r[u'value']\n if r[u'units'] == 'ratio':\n modified_value *= 100.0\n elif r[u'units'] == 'million':\n modified_value /= 1000000.0\n\n int_value = int(modified_value)\n if int_value == modified_value:\n r[u'formatted_value'] = '{:,}'.format(int_value)\n else:\n if r[u'units'] == 'ratio':\n r[u'formatted_value'] = '{:,.1f}'.format(\n self._get_decimal_value(modified_value))\n elif r[u'units'] == 'million':\n r[u'formatted_value'] = '{:,.1f}'.format(\n self._get_decimal_value(modified_value))\n #r[u'formatted_value'] += ' ' + _('million')\n\n def _get_top_line_items(self, context, datastore_resource_id):\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n result = get_action('datastore_search')(\n modified_context, {'resource_id': datastore_resource_id})\n if 'records' in result:\n self._format_results(result)\n return result['records']\n return []\n\n def _get_datastore_resource_id(self, context, dataset_id, resource_name):\n try:\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n dataset = get_action('package_show')(\n modified_context, {'id': dataset_id})\n\n if 'resources' in dataset:\n for r in dataset['resources']:\n if 'datastore_active' in r and r['datastore_active'] \\\n and r['name'] == resource_name:\n return r['id']\n return None\n except:\n log.warning('No dataset with id ' + dataset_id)\n return None\n", "path": "ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py"}]} | 1,590 | 201 |
gh_patches_debug_7958 | rasdani/github-patches | git_diff | apache__tvm-13442 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug][ci] Deploy docs is busted
See for example https://ci.tlcpack.ai/blue/organizations/jenkins/tvm/detail/main/4756/pipeline
```
Traceback (most recent call last):
File "./ci/scripts/jenkins/s3.py", line 134, in <module>
chmod(files)
File "./ci/scripts/jenkins/s3.py", line 70, in chmod
SH.run(f"chmod +x {' '.join(to_chmod)}")
File "/tmp/jenkins-ba6c252c/workspace/exec_0/tvm/deploy-docs/ci/scripts/jenkins/cmd_utils.py", line 78, in run
return subprocess.run(cmd, **defaults)
File "/usr/lib/python3.8/subprocess.py", line 493, in run
with Popen(*popenargs, **kwargs) as process:
File "/usr/lib/python3.8/subprocess.py", line 858, in __init__
self._execute_child(args, executable, preexec_fn, close_fds,
File "/usr/lib/python3.8/subprocess.py", line 1704, in _execute_child
raise child_exception_type(errno_num, err_msg, err_filename)
OSError: [Errno 7] Argument list too long: '/bin/sh'
```
cc @Mousius @driazati @gigiblender
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ci/scripts/jenkins/s3.py`
Content:
```
1 #!/usr/bin/env python3
2 # Licensed to the Apache Software Foundation (ASF) under one
3 # or more contributor license agreements. See the NOTICE file
4 # distributed with this work for additional information
5 # regarding copyright ownership. The ASF licenses this file
6 # to you under the Apache License, Version 2.0 (the
7 # "License"); you may not use this file except in compliance
8 # with the License. You may obtain a copy of the License at
9 #
10 # http://www.apache.org/licenses/LICENSE-2.0
11 #
12 # Unless required by applicable law or agreed to in writing,
13 # software distributed under the License is distributed on an
14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 # KIND, either express or implied. See the License for the
16 # specific language governing permissions and limitations
17 # under the License.
18
19 import argparse
20 import logging
21 import re
22 from pathlib import Path
23 from typing import List
24 from enum import Enum
25
26 from cmd_utils import Sh, REPO_ROOT, init_log
27
28 RETRY_SCRIPT = REPO_ROOT / "ci" / "scripts" / "jenkins" / "retry.sh"
29 S3_DOWNLOAD_REGEX = re.compile(r"download: s3://.* to (.*)")
30 SH = Sh()
31
32
33 class Action(Enum):
34 UPLOAD = 1
35 DOWNLOAD = 2
36
37
38 def show_md5(item: str) -> None:
39 if not Path(item).is_dir():
40 sh.run(f"md5sum {item}")
41
42
43 def parse_output_files(stdout: str) -> List[str]:
44 """
45 Grab the list of downloaded files from the output of 'aws s3 cp'. Lines look
46 like:
47
48 download: s3://some/prefix/a_file.txt to a_file.txt
49 """
50 files = []
51 for line in stdout.split("\n"):
52 line = line.strip()
53 if line == "":
54 continue
55 m = S3_DOWNLOAD_REGEX.match(line)
56 if m:
57 files.append(m.groups()[0])
58
59 return files
60
61
62 def chmod(files: List[str]) -> None:
63 """
64 S3 has no concept of file permissions so add them back in here to every file
65 """
66 # Add execute bit for downloads
67 to_chmod = [str(f) for f in files]
68 logging.info(f"Adding execute bit for files: {to_chmod}")
69 if len(to_chmod) > 0:
70 SH.run(f"chmod +x {' '.join(to_chmod)}")
71
72
73 def s3(source: str, destination: str, recursive: bool) -> List[str]:
74 """
75 Send or download the source to the destination in S3
76 """
77 cmd = f". {RETRY_SCRIPT.relative_to(REPO_ROOT)} && retry 3 aws s3 cp --no-progress"
78
79 if recursive:
80 cmd += " --recursive"
81
82 cmd += f" {source} {destination}"
83 _, stdout = SH.tee(cmd)
84 return stdout
85
86
87 if __name__ == "__main__":
88 init_log()
89 help = "Uploads or downloads files from S3"
90 parser = argparse.ArgumentParser(description=help)
91 parser.add_argument("--action", help="either 'upload' or 'download'", required=True)
92 parser.add_argument("--bucket", help="s3 bucket", required=True)
93 parser.add_argument(
94 "--prefix", help="s3 bucket + tag (e.g. s3://tvm-ci-prod/PR-1234/cpu", required=True
95 )
96 parser.add_argument("--items", help="files and folders to upload", nargs="+")
97
98 args = parser.parse_args()
99 logging.info(args)
100
101 sh = Sh()
102
103 if Path.cwd() != REPO_ROOT:
104 logging.error(f"s3.py can only be executed from the repo root, instead was in {Path.cwd()}")
105 exit(1)
106
107 prefix = args.prefix.strip("/")
108 s3_path = f"s3://{args.bucket}/{prefix}"
109 logging.info(f"Using s3 path: {s3_path}")
110
111 if args.action == "upload":
112 action = Action.UPLOAD
113 elif args.action == "download":
114 action = Action.DOWNLOAD
115 else:
116 logging.error(f"Unsupported action: {args.action}")
117 exit(1)
118
119 if args.items is None:
120 if args.action == "upload":
121 logging.error(f"Cannot upload without --items")
122 exit(1)
123 else:
124 # Download the whole prefix
125 items = ["."]
126
127 else:
128 items = args.items
129
130 for item in items:
131 if action == Action.DOWNLOAD:
132 stdout = s3(source=s3_path, destination=item, recursive=True)
133 files = parse_output_files(stdout)
134 chmod(files)
135 for file in files:
136 # Show md5 after downloading
137 show_md5(file)
138 elif action == Action.UPLOAD:
139 show_md5(item)
140 s3(item, s3_path + "/" + item, recursive=Path(item).is_dir())
141
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ci/scripts/jenkins/s3.py b/ci/scripts/jenkins/s3.py
--- a/ci/scripts/jenkins/s3.py
+++ b/ci/scripts/jenkins/s3.py
@@ -129,7 +129,12 @@
for item in items:
if action == Action.DOWNLOAD:
- stdout = s3(source=s3_path, destination=item, recursive=True)
+ source = s3_path
+ recursive = True
+ if item != ".":
+ source = s3_path + "/" + item
+ recursive = False
+ stdout = s3(source=source, destination=item, recursive=recursive)
files = parse_output_files(stdout)
chmod(files)
for file in files:
| {"golden_diff": "diff --git a/ci/scripts/jenkins/s3.py b/ci/scripts/jenkins/s3.py\n--- a/ci/scripts/jenkins/s3.py\n+++ b/ci/scripts/jenkins/s3.py\n@@ -129,7 +129,12 @@\n \n for item in items:\n if action == Action.DOWNLOAD:\n- stdout = s3(source=s3_path, destination=item, recursive=True)\n+ source = s3_path\n+ recursive = True\n+ if item != \".\":\n+ source = s3_path + \"/\" + item\n+ recursive = False\n+ stdout = s3(source=source, destination=item, recursive=recursive)\n files = parse_output_files(stdout)\n chmod(files)\n for file in files:\n", "issue": "[Bug][ci] Deploy docs is busted\nSee for example https://ci.tlcpack.ai/blue/organizations/jenkins/tvm/detail/main/4756/pipeline\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"./ci/scripts/jenkins/s3.py\", line 134, in <module>\r\n chmod(files)\r\n File \"./ci/scripts/jenkins/s3.py\", line 70, in chmod\r\n SH.run(f\"chmod +x {' '.join(to_chmod)}\")\r\n File \"/tmp/jenkins-ba6c252c/workspace/exec_0/tvm/deploy-docs/ci/scripts/jenkins/cmd_utils.py\", line 78, in run\r\n return subprocess.run(cmd, **defaults)\r\n File \"/usr/lib/python3.8/subprocess.py\", line 493, in run\r\n with Popen(*popenargs, **kwargs) as process:\r\n File \"/usr/lib/python3.8/subprocess.py\", line 858, in __init__\r\n self._execute_child(args, executable, preexec_fn, close_fds,\r\n File \"/usr/lib/python3.8/subprocess.py\", line 1704, in _execute_child\r\n raise child_exception_type(errno_num, err_msg, err_filename)\r\nOSError: [Errno 7] Argument list too long: '/bin/sh'\r\n```\n\ncc @Mousius @driazati @gigiblender\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport argparse\nimport logging\nimport re\nfrom pathlib import Path\nfrom typing import List\nfrom enum import Enum\n\nfrom cmd_utils import Sh, REPO_ROOT, init_log\n\nRETRY_SCRIPT = REPO_ROOT / \"ci\" / \"scripts\" / \"jenkins\" / \"retry.sh\"\nS3_DOWNLOAD_REGEX = re.compile(r\"download: s3://.* to (.*)\")\nSH = Sh()\n\n\nclass Action(Enum):\n UPLOAD = 1\n DOWNLOAD = 2\n\n\ndef show_md5(item: str) -> None:\n if not Path(item).is_dir():\n sh.run(f\"md5sum {item}\")\n\n\ndef parse_output_files(stdout: str) -> List[str]:\n \"\"\"\n Grab the list of downloaded files from the output of 'aws s3 cp'. Lines look\n like:\n\n download: s3://some/prefix/a_file.txt to a_file.txt\n \"\"\"\n files = []\n for line in stdout.split(\"\\n\"):\n line = line.strip()\n if line == \"\":\n continue\n m = S3_DOWNLOAD_REGEX.match(line)\n if m:\n files.append(m.groups()[0])\n\n return files\n\n\ndef chmod(files: List[str]) -> None:\n \"\"\"\n S3 has no concept of file permissions so add them back in here to every file\n \"\"\"\n # Add execute bit for downloads\n to_chmod = [str(f) for f in files]\n logging.info(f\"Adding execute bit for files: {to_chmod}\")\n if len(to_chmod) > 0:\n SH.run(f\"chmod +x {' '.join(to_chmod)}\")\n\n\ndef s3(source: str, destination: str, recursive: bool) -> List[str]:\n \"\"\"\n Send or download the source to the destination in S3\n \"\"\"\n cmd = f\". {RETRY_SCRIPT.relative_to(REPO_ROOT)} && retry 3 aws s3 cp --no-progress\"\n\n if recursive:\n cmd += \" --recursive\"\n\n cmd += f\" {source} {destination}\"\n _, stdout = SH.tee(cmd)\n return stdout\n\n\nif __name__ == \"__main__\":\n init_log()\n help = \"Uploads or downloads files from S3\"\n parser = argparse.ArgumentParser(description=help)\n parser.add_argument(\"--action\", help=\"either 'upload' or 'download'\", required=True)\n parser.add_argument(\"--bucket\", help=\"s3 bucket\", required=True)\n parser.add_argument(\n \"--prefix\", help=\"s3 bucket + tag (e.g. s3://tvm-ci-prod/PR-1234/cpu\", required=True\n )\n parser.add_argument(\"--items\", help=\"files and folders to upload\", nargs=\"+\")\n\n args = parser.parse_args()\n logging.info(args)\n\n sh = Sh()\n\n if Path.cwd() != REPO_ROOT:\n logging.error(f\"s3.py can only be executed from the repo root, instead was in {Path.cwd()}\")\n exit(1)\n\n prefix = args.prefix.strip(\"/\")\n s3_path = f\"s3://{args.bucket}/{prefix}\"\n logging.info(f\"Using s3 path: {s3_path}\")\n\n if args.action == \"upload\":\n action = Action.UPLOAD\n elif args.action == \"download\":\n action = Action.DOWNLOAD\n else:\n logging.error(f\"Unsupported action: {args.action}\")\n exit(1)\n\n if args.items is None:\n if args.action == \"upload\":\n logging.error(f\"Cannot upload without --items\")\n exit(1)\n else:\n # Download the whole prefix\n items = [\".\"]\n\n else:\n items = args.items\n\n for item in items:\n if action == Action.DOWNLOAD:\n stdout = s3(source=s3_path, destination=item, recursive=True)\n files = parse_output_files(stdout)\n chmod(files)\n for file in files:\n # Show md5 after downloading\n show_md5(file)\n elif action == Action.UPLOAD:\n show_md5(item)\n s3(item, s3_path + \"/\" + item, recursive=Path(item).is_dir())\n", "path": "ci/scripts/jenkins/s3.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport argparse\nimport logging\nimport re\nfrom pathlib import Path\nfrom typing import List\nfrom enum import Enum\n\nfrom cmd_utils import Sh, REPO_ROOT, init_log\n\nRETRY_SCRIPT = REPO_ROOT / \"ci\" / \"scripts\" / \"jenkins\" / \"retry.sh\"\nS3_DOWNLOAD_REGEX = re.compile(r\"download: s3://.* to (.*)\")\nSH = Sh()\n\n\nclass Action(Enum):\n UPLOAD = 1\n DOWNLOAD = 2\n\n\ndef show_md5(item: str) -> None:\n if not Path(item).is_dir():\n sh.run(f\"md5sum {item}\")\n\n\ndef parse_output_files(stdout: str) -> List[str]:\n \"\"\"\n Grab the list of downloaded files from the output of 'aws s3 cp'. Lines look\n like:\n\n download: s3://some/prefix/a_file.txt to a_file.txt\n \"\"\"\n files = []\n for line in stdout.split(\"\\n\"):\n line = line.strip()\n if line == \"\":\n continue\n m = S3_DOWNLOAD_REGEX.match(line)\n if m:\n files.append(m.groups()[0])\n\n return files\n\n\ndef chmod(files: List[str]) -> None:\n \"\"\"\n S3 has no concept of file permissions so add them back in here to every file\n \"\"\"\n # Add execute bit for downloads\n to_chmod = [str(f) for f in files]\n logging.info(f\"Adding execute bit for files: {to_chmod}\")\n if len(to_chmod) > 0:\n SH.run(f\"chmod +x {' '.join(to_chmod)}\")\n\n\ndef s3(source: str, destination: str, recursive: bool) -> List[str]:\n \"\"\"\n Send or download the source to the destination in S3\n \"\"\"\n cmd = f\". {RETRY_SCRIPT.relative_to(REPO_ROOT)} && retry 3 aws s3 cp --no-progress\"\n\n if recursive:\n cmd += \" --recursive\"\n\n cmd += f\" {source} {destination}\"\n _, stdout = SH.tee(cmd)\n return stdout\n\n\nif __name__ == \"__main__\":\n init_log()\n help = \"Uploads or downloads files from S3\"\n parser = argparse.ArgumentParser(description=help)\n parser.add_argument(\"--action\", help=\"either 'upload' or 'download'\", required=True)\n parser.add_argument(\"--bucket\", help=\"s3 bucket\", required=True)\n parser.add_argument(\n \"--prefix\", help=\"s3 bucket + tag (e.g. s3://tvm-ci-prod/PR-1234/cpu\", required=True\n )\n parser.add_argument(\"--items\", help=\"files and folders to upload\", nargs=\"+\")\n\n args = parser.parse_args()\n logging.info(args)\n\n sh = Sh()\n\n if Path.cwd() != REPO_ROOT:\n logging.error(f\"s3.py can only be executed from the repo root, instead was in {Path.cwd()}\")\n exit(1)\n\n prefix = args.prefix.strip(\"/\")\n s3_path = f\"s3://{args.bucket}/{prefix}\"\n logging.info(f\"Using s3 path: {s3_path}\")\n\n if args.action == \"upload\":\n action = Action.UPLOAD\n elif args.action == \"download\":\n action = Action.DOWNLOAD\n else:\n logging.error(f\"Unsupported action: {args.action}\")\n exit(1)\n\n if args.items is None:\n if args.action == \"upload\":\n logging.error(f\"Cannot upload without --items\")\n exit(1)\n else:\n # Download the whole prefix\n items = [\".\"]\n\n else:\n items = args.items\n\n for item in items:\n if action == Action.DOWNLOAD:\n source = s3_path\n recursive = True\n if item != \".\":\n source = s3_path + \"/\" + item\n recursive = False\n stdout = s3(source=source, destination=item, recursive=recursive)\n files = parse_output_files(stdout)\n chmod(files)\n for file in files:\n # Show md5 after downloading\n show_md5(file)\n elif action == Action.UPLOAD:\n show_md5(item)\n s3(item, s3_path + \"/\" + item, recursive=Path(item).is_dir())\n", "path": "ci/scripts/jenkins/s3.py"}]} | 1,956 | 165 |
gh_patches_debug_25151 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-2933 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Malformed Links in Profiles and Posts
**Describe the bug**
When you write a URL at the end of a sentence in your profile or in a post, the trailing dot (.) is appended to the generated HTML anchor.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to your profile
2. Click on 'edit profile'
3. Insert "https://github.com/bookwyrm-social/bookwyrm/issues." (with the dot at the end)
4. Click on "save"
5. Follow the Link
**Example**
https://bookwyrm.social/user/taarion
**Expected behavior**
The Link shoud be without the dot
**Instance**
bookwyrm.social i.E.
---
**Desktop (please complete the following information):**
- OS: Windows 11
- Browser Vivaldi
- Version 6.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/status.py`
Content:
```
1 """ what are we here for if not for posting """
2 import re
3 import logging
4 from urllib.parse import urlparse
5
6 from django.contrib.auth.decorators import login_required
7 from django.core.validators import URLValidator
8 from django.core.exceptions import ValidationError
9 from django.db.models import Q
10 from django.http import HttpResponse, HttpResponseBadRequest, Http404
11 from django.shortcuts import get_object_or_404, redirect
12 from django.template.response import TemplateResponse
13 from django.utils import timezone
14 from django.utils.decorators import method_decorator
15 from django.views import View
16 from django.views.decorators.http import require_POST
17
18 from markdown import markdown
19 from bookwyrm import forms, models
20 from bookwyrm.utils import regex, sanitizer
21 from .helpers import handle_remote_webfinger, is_api_request
22 from .helpers import load_date_in_user_tz_as_utc, redirect_to_referer
23
24 logger = logging.getLogger(__name__)
25
26
27 # pylint: disable= no-self-use
28 @method_decorator(login_required, name="dispatch")
29 class EditStatus(View):
30 """the view for *posting*"""
31
32 def get(self, request, status_id): # pylint: disable=unused-argument
33 """load the edit panel"""
34 status = get_object_or_404(
35 models.Status.objects.select_subclasses(), id=status_id
36 )
37
38 status_type = "reply" if status.reply_parent else status.status_type.lower()
39 data = {
40 "type": status_type,
41 "book": getattr(status, "book", None),
42 "draft": status,
43 }
44 return TemplateResponse(request, "compose.html", data)
45
46
47 # pylint: disable= no-self-use
48 @method_decorator(login_required, name="dispatch")
49 class CreateStatus(View):
50 """the view for *posting*"""
51
52 def get(self, request, status_type): # pylint: disable=unused-argument
53 """compose view (...not used?)"""
54 book = get_object_or_404(models.Edition, id=request.GET.get("book"))
55 data = {"book": book}
56 return TemplateResponse(request, "compose.html", data)
57
58 # pylint: disable=too-many-branches
59 def post(self, request, status_type, existing_status_id=None):
60 """create status of whatever type"""
61 created = not existing_status_id
62 existing_status = None
63 if existing_status_id:
64 existing_status = get_object_or_404(
65 models.Status.objects.select_subclasses(), id=existing_status_id
66 )
67 existing_status.edited_date = timezone.now()
68
69 status_type = status_type[0].upper() + status_type[1:]
70
71 try:
72 form = getattr(forms, f"{status_type}Form")(
73 request.POST, instance=existing_status
74 )
75 except AttributeError as err:
76 logger.exception(err)
77 return HttpResponseBadRequest()
78
79 if not form.is_valid():
80 if is_api_request(request):
81 logger.exception(form.errors)
82 return HttpResponseBadRequest()
83 return redirect_to_referer(request)
84
85 status = form.save(request, commit=False)
86 status.ready = False
87 # save the plain, unformatted version of the status for future editing
88 status.raw_content = status.content
89 if hasattr(status, "quote"):
90 status.raw_quote = status.quote
91
92 status.sensitive = status.content_warning not in [None, ""]
93 # the status has to be saved now before we can add many to many fields
94 # like mentions
95 status.save(broadcast=False)
96
97 # inspect the text for user tags
98 content = status.content
99 mentions = find_mentions(request.user, content)
100 for (_, mention_user) in mentions.items():
101 # add them to status mentions fk
102 status.mention_users.add(mention_user)
103 content = format_mentions(content, mentions)
104
105 # add reply parent to mentions
106 if status.reply_parent:
107 status.mention_users.add(status.reply_parent.user)
108
109 # inspect the text for hashtags
110 hashtags = find_or_create_hashtags(content)
111 for (_, mention_hashtag) in hashtags.items():
112 # add them to status mentions fk
113 status.mention_hashtags.add(mention_hashtag)
114 content = format_hashtags(content, hashtags)
115
116 # deduplicate mentions
117 status.mention_users.set(set(status.mention_users.all()))
118
119 # don't apply formatting to generated notes
120 if not isinstance(status, models.GeneratedNote) and content:
121 status.content = to_markdown(content)
122 # do apply formatting to quotes
123 if hasattr(status, "quote"):
124 status.quote = to_markdown(status.quote)
125
126 status.ready = True
127 status.save(created=created)
128
129 # update a readthrough, if needed
130 if bool(request.POST.get("id")):
131 try:
132 edit_readthrough(request)
133 except Http404:
134 pass
135
136 if is_api_request(request):
137 return HttpResponse()
138 return redirect_to_referer(request)
139
140
141 def format_mentions(content, mentions):
142 """Detect @mentions and make them links"""
143 for (mention_text, mention_user) in mentions.items():
144 # turn the mention into a link
145 content = re.sub(
146 rf"(?<!/)\B{mention_text}\b(?!@)",
147 rf'<a href="{mention_user.remote_id}">{mention_text}</a>',
148 content,
149 )
150 return content
151
152
153 def format_hashtags(content, hashtags):
154 """Detect #hashtags and make them links"""
155 for (mention_text, mention_hashtag) in hashtags.items():
156 # turn the mention into a link
157 content = re.sub(
158 rf"(?<!/)\B{mention_text}\b(?!@)",
159 rf'<a href="{mention_hashtag.remote_id}" data-mention="hashtag">'
160 + rf"{mention_text}</a>",
161 content,
162 )
163 return content
164
165
166 @method_decorator(login_required, name="dispatch")
167 class DeleteStatus(View):
168 """tombstone that bad boy"""
169
170 def post(self, request, status_id):
171 """delete and tombstone a status"""
172 status = get_object_or_404(models.Status, id=status_id)
173
174 # don't let people delete other people's statuses
175 status.raise_not_deletable(request.user)
176
177 # perform deletion
178 status.delete()
179 return redirect("/")
180
181
182 @login_required
183 @require_POST
184 def update_progress(request, book_id): # pylint: disable=unused-argument
185 """Either it's just a progress update, or it's a comment with a progress update"""
186 if request.POST.get("post-status"):
187 return CreateStatus.as_view()(request, "comment")
188 return edit_readthrough(request)
189
190
191 @login_required
192 @require_POST
193 def edit_readthrough(request):
194 """can't use the form because the dates are too finnicky"""
195 # TODO: remove this, it duplicates the code in the ReadThrough view
196 readthrough = get_object_or_404(models.ReadThrough, id=request.POST.get("id"))
197
198 readthrough.start_date = load_date_in_user_tz_as_utc(
199 request.POST.get("start_date"), request.user
200 )
201 readthrough.finish_date = load_date_in_user_tz_as_utc(
202 request.POST.get("finish_date"), request.user
203 )
204
205 progress = request.POST.get("progress")
206 try:
207 progress = int(progress)
208 readthrough.progress = progress
209 except (ValueError, TypeError):
210 pass
211
212 progress_mode = request.POST.get("progress_mode")
213 try:
214 progress_mode = models.ProgressMode(progress_mode)
215 readthrough.progress_mode = progress_mode
216 except ValueError:
217 pass
218
219 readthrough.save()
220
221 # record the progress update individually
222 # use default now for date field
223 readthrough.create_update()
224
225 if is_api_request(request):
226 return HttpResponse()
227 return redirect_to_referer(request)
228
229
230 def find_mentions(user, content):
231 """detect @mentions in raw status content"""
232 if not content:
233 return {}
234 # The regex has nested match groups, so the 0th entry has the full (outer) match
235 # And because the strict username starts with @, the username is 1st char onward
236 usernames = [m[0][1:] for m in re.findall(regex.STRICT_USERNAME, content)]
237
238 known_users = (
239 models.User.viewer_aware_objects(user)
240 .filter(Q(username__in=usernames) | Q(localname__in=usernames))
241 .distinct()
242 )
243 # Prepare a lookup based on both username and localname
244 username_dict = {
245 **{f"@{u.username}": u for u in known_users},
246 **{f"@{u.localname}": u for u in known_users.filter(local=True)},
247 }
248
249 # Users not captured here could be blocked or not yet loaded on the server
250 not_found = set(usernames) - set(username_dict.keys())
251 for username in not_found:
252 mention_user = handle_remote_webfinger(username, unknown_only=True)
253 if not mention_user:
254 # this user is blocked or can't be found
255 continue
256 username_dict[f"@{mention_user.username}"] = mention_user
257 username_dict[f"@{mention_user.localname}"] = mention_user
258 return username_dict
259
260
261 def find_or_create_hashtags(content):
262 """detect #hashtags in raw status content
263
264 it stores hashtags case-sensitive, but ensures that an existing
265 hashtag with different case are found and re-used. for example,
266 an existing #BookWyrm hashtag will be found and used even if the
267 status content is using #bookwyrm.
268 """
269 if not content:
270 return {}
271
272 found_hashtags = {t.lower(): t for t in re.findall(regex.HASHTAG, content)}
273 if len(found_hashtags) == 0:
274 return {}
275
276 known_hashtags = {
277 t.name.lower(): t
278 for t in models.Hashtag.objects.filter(
279 Q(name__in=found_hashtags.keys())
280 ).distinct()
281 }
282
283 not_found = found_hashtags.keys() - known_hashtags.keys()
284 for lower_name in not_found:
285 tag_name = found_hashtags[lower_name]
286 mention_hashtag = models.Hashtag(name=tag_name)
287 mention_hashtag.save()
288 known_hashtags[lower_name] = mention_hashtag
289
290 return {found_hashtags[k]: v for k, v in known_hashtags.items()}
291
292
293 def format_links(content):
294 """detect and format links"""
295 validator = URLValidator()
296 formatted_content = ""
297 split_content = re.split(r"(\s+)", content)
298
299 for potential_link in split_content:
300 if not potential_link:
301 continue
302 wrapped = _wrapped(potential_link)
303 if wrapped:
304 wrapper_close = potential_link[-1]
305 formatted_content += potential_link[0]
306 potential_link = potential_link[1:-1]
307
308 try:
309 # raises an error on anything that's not a valid link
310 validator(potential_link)
311
312 # use everything but the scheme in the presentation of the link
313 url = urlparse(potential_link)
314 link = url.netloc + url.path + url.params
315 if url.query != "":
316 link += "?" + url.query
317 if url.fragment != "":
318 link += "#" + url.fragment
319
320 formatted_content += f'<a href="{potential_link}">{link}</a>'
321 except (ValidationError, UnicodeError):
322 formatted_content += potential_link
323
324 if wrapped:
325 formatted_content += wrapper_close
326
327 return formatted_content
328
329
330 def _wrapped(text):
331 """check if a line of text is wrapped"""
332 wrappers = [("(", ")"), ("[", "]"), ("{", "}")]
333 for wrapper in wrappers:
334 if text[0] == wrapper[0] and text[-1] == wrapper[-1]:
335 return True
336 return False
337
338
339 def to_markdown(content):
340 """catch links and convert to markdown"""
341 content = format_links(content)
342 content = markdown(content)
343 # sanitize resulting html
344 return sanitizer.clean(content)
345
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/views/status.py b/bookwyrm/views/status.py
--- a/bookwyrm/views/status.py
+++ b/bookwyrm/views/status.py
@@ -305,6 +305,11 @@
formatted_content += potential_link[0]
potential_link = potential_link[1:-1]
+ ends_with_punctuation = _ends_with_punctuation(potential_link)
+ if ends_with_punctuation:
+ punctuation_glyph = potential_link[-1]
+ potential_link = potential_link[0:-1]
+
try:
# raises an error on anything that's not a valid link
validator(potential_link)
@@ -324,6 +329,9 @@
if wrapped:
formatted_content += wrapper_close
+ if ends_with_punctuation:
+ formatted_content += punctuation_glyph
+
return formatted_content
@@ -336,6 +344,15 @@
return False
+def _ends_with_punctuation(text):
+ """check if a line of text ends with a punctuation glyph"""
+ glyphs = [".", ",", ";", ":", "!", "?", "”", "’", '"', "»"]
+ for glyph in glyphs:
+ if text[-1] == glyph:
+ return True
+ return False
+
+
def to_markdown(content):
"""catch links and convert to markdown"""
content = format_links(content)
| {"golden_diff": "diff --git a/bookwyrm/views/status.py b/bookwyrm/views/status.py\n--- a/bookwyrm/views/status.py\n+++ b/bookwyrm/views/status.py\n@@ -305,6 +305,11 @@\n formatted_content += potential_link[0]\n potential_link = potential_link[1:-1]\n \n+ ends_with_punctuation = _ends_with_punctuation(potential_link)\n+ if ends_with_punctuation:\n+ punctuation_glyph = potential_link[-1]\n+ potential_link = potential_link[0:-1]\n+\n try:\n # raises an error on anything that's not a valid link\n validator(potential_link)\n@@ -324,6 +329,9 @@\n if wrapped:\n formatted_content += wrapper_close\n \n+ if ends_with_punctuation:\n+ formatted_content += punctuation_glyph\n+\n return formatted_content\n \n \n@@ -336,6 +344,15 @@\n return False\n \n \n+def _ends_with_punctuation(text):\n+ \"\"\"check if a line of text ends with a punctuation glyph\"\"\"\n+ glyphs = [\".\", \",\", \";\", \":\", \"!\", \"?\", \"\u201d\", \"\u2019\", '\"', \"\u00bb\"]\n+ for glyph in glyphs:\n+ if text[-1] == glyph:\n+ return True\n+ return False\n+\n+\n def to_markdown(content):\n \"\"\"catch links and convert to markdown\"\"\"\n content = format_links(content)\n", "issue": "Malformed Links in Profiles and Posts\n**Describe the bug**\r\nWhen you write a URL at the end of a sentence in your profile or in a post, the trailing dot (.) is appended to the generated HTML anchor.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to your profile\r\n2. Click on 'edit profile'\r\n3. Insert \"https://github.com/bookwyrm-social/bookwyrm/issues.\" (with the dot at the end)\r\n4. Click on \"save\"\r\n5. Follow the Link\r\n\r\n**Example**\r\nhttps://bookwyrm.social/user/taarion\r\n\r\n**Expected behavior**\r\nThe Link shoud be without the dot\r\n\r\n**Instance**\r\nbookwyrm.social i.E.\r\n\r\n\r\n---\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Windows 11\r\n - Browser Vivaldi\r\n - Version 6.1\r\n\r\n\n", "before_files": [{"content": "\"\"\" what are we here for if not for posting \"\"\"\nimport re\nimport logging\nfrom urllib.parse import urlparse\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.validators import URLValidator\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Q\nfrom django.http import HttpResponse, HttpResponseBadRequest, Http404\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.decorators.http import require_POST\n\nfrom markdown import markdown\nfrom bookwyrm import forms, models\nfrom bookwyrm.utils import regex, sanitizer\nfrom .helpers import handle_remote_webfinger, is_api_request\nfrom .helpers import load_date_in_user_tz_as_utc, redirect_to_referer\n\nlogger = logging.getLogger(__name__)\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass EditStatus(View):\n \"\"\"the view for *posting*\"\"\"\n\n def get(self, request, status_id): # pylint: disable=unused-argument\n \"\"\"load the edit panel\"\"\"\n status = get_object_or_404(\n models.Status.objects.select_subclasses(), id=status_id\n )\n\n status_type = \"reply\" if status.reply_parent else status.status_type.lower()\n data = {\n \"type\": status_type,\n \"book\": getattr(status, \"book\", None),\n \"draft\": status,\n }\n return TemplateResponse(request, \"compose.html\", data)\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass CreateStatus(View):\n \"\"\"the view for *posting*\"\"\"\n\n def get(self, request, status_type): # pylint: disable=unused-argument\n \"\"\"compose view (...not used?)\"\"\"\n book = get_object_or_404(models.Edition, id=request.GET.get(\"book\"))\n data = {\"book\": book}\n return TemplateResponse(request, \"compose.html\", data)\n\n # pylint: disable=too-many-branches\n def post(self, request, status_type, existing_status_id=None):\n \"\"\"create status of whatever type\"\"\"\n created = not existing_status_id\n existing_status = None\n if existing_status_id:\n existing_status = get_object_or_404(\n models.Status.objects.select_subclasses(), id=existing_status_id\n )\n existing_status.edited_date = timezone.now()\n\n status_type = status_type[0].upper() + status_type[1:]\n\n try:\n form = getattr(forms, f\"{status_type}Form\")(\n request.POST, instance=existing_status\n )\n except AttributeError as err:\n logger.exception(err)\n return HttpResponseBadRequest()\n\n if not form.is_valid():\n if is_api_request(request):\n logger.exception(form.errors)\n return HttpResponseBadRequest()\n return redirect_to_referer(request)\n\n status = form.save(request, commit=False)\n status.ready = False\n # save the plain, unformatted version of the status for future editing\n status.raw_content = status.content\n if hasattr(status, \"quote\"):\n status.raw_quote = status.quote\n\n status.sensitive = status.content_warning not in [None, \"\"]\n # the status has to be saved now before we can add many to many fields\n # like mentions\n status.save(broadcast=False)\n\n # inspect the text for user tags\n content = status.content\n mentions = find_mentions(request.user, content)\n for (_, mention_user) in mentions.items():\n # add them to status mentions fk\n status.mention_users.add(mention_user)\n content = format_mentions(content, mentions)\n\n # add reply parent to mentions\n if status.reply_parent:\n status.mention_users.add(status.reply_parent.user)\n\n # inspect the text for hashtags\n hashtags = find_or_create_hashtags(content)\n for (_, mention_hashtag) in hashtags.items():\n # add them to status mentions fk\n status.mention_hashtags.add(mention_hashtag)\n content = format_hashtags(content, hashtags)\n\n # deduplicate mentions\n status.mention_users.set(set(status.mention_users.all()))\n\n # don't apply formatting to generated notes\n if not isinstance(status, models.GeneratedNote) and content:\n status.content = to_markdown(content)\n # do apply formatting to quotes\n if hasattr(status, \"quote\"):\n status.quote = to_markdown(status.quote)\n\n status.ready = True\n status.save(created=created)\n\n # update a readthrough, if needed\n if bool(request.POST.get(\"id\")):\n try:\n edit_readthrough(request)\n except Http404:\n pass\n\n if is_api_request(request):\n return HttpResponse()\n return redirect_to_referer(request)\n\n\ndef format_mentions(content, mentions):\n \"\"\"Detect @mentions and make them links\"\"\"\n for (mention_text, mention_user) in mentions.items():\n # turn the mention into a link\n content = re.sub(\n rf\"(?<!/)\\B{mention_text}\\b(?!@)\",\n rf'<a href=\"{mention_user.remote_id}\">{mention_text}</a>',\n content,\n )\n return content\n\n\ndef format_hashtags(content, hashtags):\n \"\"\"Detect #hashtags and make them links\"\"\"\n for (mention_text, mention_hashtag) in hashtags.items():\n # turn the mention into a link\n content = re.sub(\n rf\"(?<!/)\\B{mention_text}\\b(?!@)\",\n rf'<a href=\"{mention_hashtag.remote_id}\" data-mention=\"hashtag\">'\n + rf\"{mention_text}</a>\",\n content,\n )\n return content\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass DeleteStatus(View):\n \"\"\"tombstone that bad boy\"\"\"\n\n def post(self, request, status_id):\n \"\"\"delete and tombstone a status\"\"\"\n status = get_object_or_404(models.Status, id=status_id)\n\n # don't let people delete other people's statuses\n status.raise_not_deletable(request.user)\n\n # perform deletion\n status.delete()\n return redirect(\"/\")\n\n\n@login_required\n@require_POST\ndef update_progress(request, book_id): # pylint: disable=unused-argument\n \"\"\"Either it's just a progress update, or it's a comment with a progress update\"\"\"\n if request.POST.get(\"post-status\"):\n return CreateStatus.as_view()(request, \"comment\")\n return edit_readthrough(request)\n\n\n@login_required\n@require_POST\ndef edit_readthrough(request):\n \"\"\"can't use the form because the dates are too finnicky\"\"\"\n # TODO: remove this, it duplicates the code in the ReadThrough view\n readthrough = get_object_or_404(models.ReadThrough, id=request.POST.get(\"id\"))\n\n readthrough.start_date = load_date_in_user_tz_as_utc(\n request.POST.get(\"start_date\"), request.user\n )\n readthrough.finish_date = load_date_in_user_tz_as_utc(\n request.POST.get(\"finish_date\"), request.user\n )\n\n progress = request.POST.get(\"progress\")\n try:\n progress = int(progress)\n readthrough.progress = progress\n except (ValueError, TypeError):\n pass\n\n progress_mode = request.POST.get(\"progress_mode\")\n try:\n progress_mode = models.ProgressMode(progress_mode)\n readthrough.progress_mode = progress_mode\n except ValueError:\n pass\n\n readthrough.save()\n\n # record the progress update individually\n # use default now for date field\n readthrough.create_update()\n\n if is_api_request(request):\n return HttpResponse()\n return redirect_to_referer(request)\n\n\ndef find_mentions(user, content):\n \"\"\"detect @mentions in raw status content\"\"\"\n if not content:\n return {}\n # The regex has nested match groups, so the 0th entry has the full (outer) match\n # And because the strict username starts with @, the username is 1st char onward\n usernames = [m[0][1:] for m in re.findall(regex.STRICT_USERNAME, content)]\n\n known_users = (\n models.User.viewer_aware_objects(user)\n .filter(Q(username__in=usernames) | Q(localname__in=usernames))\n .distinct()\n )\n # Prepare a lookup based on both username and localname\n username_dict = {\n **{f\"@{u.username}\": u for u in known_users},\n **{f\"@{u.localname}\": u for u in known_users.filter(local=True)},\n }\n\n # Users not captured here could be blocked or not yet loaded on the server\n not_found = set(usernames) - set(username_dict.keys())\n for username in not_found:\n mention_user = handle_remote_webfinger(username, unknown_only=True)\n if not mention_user:\n # this user is blocked or can't be found\n continue\n username_dict[f\"@{mention_user.username}\"] = mention_user\n username_dict[f\"@{mention_user.localname}\"] = mention_user\n return username_dict\n\n\ndef find_or_create_hashtags(content):\n \"\"\"detect #hashtags in raw status content\n\n it stores hashtags case-sensitive, but ensures that an existing\n hashtag with different case are found and re-used. for example,\n an existing #BookWyrm hashtag will be found and used even if the\n status content is using #bookwyrm.\n \"\"\"\n if not content:\n return {}\n\n found_hashtags = {t.lower(): t for t in re.findall(regex.HASHTAG, content)}\n if len(found_hashtags) == 0:\n return {}\n\n known_hashtags = {\n t.name.lower(): t\n for t in models.Hashtag.objects.filter(\n Q(name__in=found_hashtags.keys())\n ).distinct()\n }\n\n not_found = found_hashtags.keys() - known_hashtags.keys()\n for lower_name in not_found:\n tag_name = found_hashtags[lower_name]\n mention_hashtag = models.Hashtag(name=tag_name)\n mention_hashtag.save()\n known_hashtags[lower_name] = mention_hashtag\n\n return {found_hashtags[k]: v for k, v in known_hashtags.items()}\n\n\ndef format_links(content):\n \"\"\"detect and format links\"\"\"\n validator = URLValidator()\n formatted_content = \"\"\n split_content = re.split(r\"(\\s+)\", content)\n\n for potential_link in split_content:\n if not potential_link:\n continue\n wrapped = _wrapped(potential_link)\n if wrapped:\n wrapper_close = potential_link[-1]\n formatted_content += potential_link[0]\n potential_link = potential_link[1:-1]\n\n try:\n # raises an error on anything that's not a valid link\n validator(potential_link)\n\n # use everything but the scheme in the presentation of the link\n url = urlparse(potential_link)\n link = url.netloc + url.path + url.params\n if url.query != \"\":\n link += \"?\" + url.query\n if url.fragment != \"\":\n link += \"#\" + url.fragment\n\n formatted_content += f'<a href=\"{potential_link}\">{link}</a>'\n except (ValidationError, UnicodeError):\n formatted_content += potential_link\n\n if wrapped:\n formatted_content += wrapper_close\n\n return formatted_content\n\n\ndef _wrapped(text):\n \"\"\"check if a line of text is wrapped\"\"\"\n wrappers = [(\"(\", \")\"), (\"[\", \"]\"), (\"{\", \"}\")]\n for wrapper in wrappers:\n if text[0] == wrapper[0] and text[-1] == wrapper[-1]:\n return True\n return False\n\n\ndef to_markdown(content):\n \"\"\"catch links and convert to markdown\"\"\"\n content = format_links(content)\n content = markdown(content)\n # sanitize resulting html\n return sanitizer.clean(content)\n", "path": "bookwyrm/views/status.py"}], "after_files": [{"content": "\"\"\" what are we here for if not for posting \"\"\"\nimport re\nimport logging\nfrom urllib.parse import urlparse\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.validators import URLValidator\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Q\nfrom django.http import HttpResponse, HttpResponseBadRequest, Http404\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.decorators.http import require_POST\n\nfrom markdown import markdown\nfrom bookwyrm import forms, models\nfrom bookwyrm.utils import regex, sanitizer\nfrom .helpers import handle_remote_webfinger, is_api_request\nfrom .helpers import load_date_in_user_tz_as_utc, redirect_to_referer\n\nlogger = logging.getLogger(__name__)\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass EditStatus(View):\n \"\"\"the view for *posting*\"\"\"\n\n def get(self, request, status_id): # pylint: disable=unused-argument\n \"\"\"load the edit panel\"\"\"\n status = get_object_or_404(\n models.Status.objects.select_subclasses(), id=status_id\n )\n\n status_type = \"reply\" if status.reply_parent else status.status_type.lower()\n data = {\n \"type\": status_type,\n \"book\": getattr(status, \"book\", None),\n \"draft\": status,\n }\n return TemplateResponse(request, \"compose.html\", data)\n\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass CreateStatus(View):\n \"\"\"the view for *posting*\"\"\"\n\n def get(self, request, status_type): # pylint: disable=unused-argument\n \"\"\"compose view (...not used?)\"\"\"\n book = get_object_or_404(models.Edition, id=request.GET.get(\"book\"))\n data = {\"book\": book}\n return TemplateResponse(request, \"compose.html\", data)\n\n # pylint: disable=too-many-branches\n def post(self, request, status_type, existing_status_id=None):\n \"\"\"create status of whatever type\"\"\"\n created = not existing_status_id\n existing_status = None\n if existing_status_id:\n existing_status = get_object_or_404(\n models.Status.objects.select_subclasses(), id=existing_status_id\n )\n existing_status.edited_date = timezone.now()\n\n status_type = status_type[0].upper() + status_type[1:]\n\n try:\n form = getattr(forms, f\"{status_type}Form\")(\n request.POST, instance=existing_status\n )\n except AttributeError as err:\n logger.exception(err)\n return HttpResponseBadRequest()\n\n if not form.is_valid():\n if is_api_request(request):\n logger.exception(form.errors)\n return HttpResponseBadRequest()\n return redirect_to_referer(request)\n\n status = form.save(request, commit=False)\n status.ready = False\n # save the plain, unformatted version of the status for future editing\n status.raw_content = status.content\n if hasattr(status, \"quote\"):\n status.raw_quote = status.quote\n\n status.sensitive = status.content_warning not in [None, \"\"]\n # the status has to be saved now before we can add many to many fields\n # like mentions\n status.save(broadcast=False)\n\n # inspect the text for user tags\n content = status.content\n mentions = find_mentions(request.user, content)\n for (_, mention_user) in mentions.items():\n # add them to status mentions fk\n status.mention_users.add(mention_user)\n content = format_mentions(content, mentions)\n\n # add reply parent to mentions\n if status.reply_parent:\n status.mention_users.add(status.reply_parent.user)\n\n # inspect the text for hashtags\n hashtags = find_or_create_hashtags(content)\n for (_, mention_hashtag) in hashtags.items():\n # add them to status mentions fk\n status.mention_hashtags.add(mention_hashtag)\n content = format_hashtags(content, hashtags)\n\n # deduplicate mentions\n status.mention_users.set(set(status.mention_users.all()))\n\n # don't apply formatting to generated notes\n if not isinstance(status, models.GeneratedNote) and content:\n status.content = to_markdown(content)\n # do apply formatting to quotes\n if hasattr(status, \"quote\"):\n status.quote = to_markdown(status.quote)\n\n status.ready = True\n status.save(created=created)\n\n # update a readthrough, if needed\n if bool(request.POST.get(\"id\")):\n try:\n edit_readthrough(request)\n except Http404:\n pass\n\n if is_api_request(request):\n return HttpResponse()\n return redirect_to_referer(request)\n\n\ndef format_mentions(content, mentions):\n \"\"\"Detect @mentions and make them links\"\"\"\n for (mention_text, mention_user) in mentions.items():\n # turn the mention into a link\n content = re.sub(\n rf\"(?<!/)\\B{mention_text}\\b(?!@)\",\n rf'<a href=\"{mention_user.remote_id}\">{mention_text}</a>',\n content,\n )\n return content\n\n\ndef format_hashtags(content, hashtags):\n \"\"\"Detect #hashtags and make them links\"\"\"\n for (mention_text, mention_hashtag) in hashtags.items():\n # turn the mention into a link\n content = re.sub(\n rf\"(?<!/)\\B{mention_text}\\b(?!@)\",\n rf'<a href=\"{mention_hashtag.remote_id}\" data-mention=\"hashtag\">'\n + rf\"{mention_text}</a>\",\n content,\n )\n return content\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass DeleteStatus(View):\n \"\"\"tombstone that bad boy\"\"\"\n\n def post(self, request, status_id):\n \"\"\"delete and tombstone a status\"\"\"\n status = get_object_or_404(models.Status, id=status_id)\n\n # don't let people delete other people's statuses\n status.raise_not_deletable(request.user)\n\n # perform deletion\n status.delete()\n return redirect(\"/\")\n\n\n@login_required\n@require_POST\ndef update_progress(request, book_id): # pylint: disable=unused-argument\n \"\"\"Either it's just a progress update, or it's a comment with a progress update\"\"\"\n if request.POST.get(\"post-status\"):\n return CreateStatus.as_view()(request, \"comment\")\n return edit_readthrough(request)\n\n\n@login_required\n@require_POST\ndef edit_readthrough(request):\n \"\"\"can't use the form because the dates are too finnicky\"\"\"\n # TODO: remove this, it duplicates the code in the ReadThrough view\n readthrough = get_object_or_404(models.ReadThrough, id=request.POST.get(\"id\"))\n\n readthrough.start_date = load_date_in_user_tz_as_utc(\n request.POST.get(\"start_date\"), request.user\n )\n readthrough.finish_date = load_date_in_user_tz_as_utc(\n request.POST.get(\"finish_date\"), request.user\n )\n\n progress = request.POST.get(\"progress\")\n try:\n progress = int(progress)\n readthrough.progress = progress\n except (ValueError, TypeError):\n pass\n\n progress_mode = request.POST.get(\"progress_mode\")\n try:\n progress_mode = models.ProgressMode(progress_mode)\n readthrough.progress_mode = progress_mode\n except ValueError:\n pass\n\n readthrough.save()\n\n # record the progress update individually\n # use default now for date field\n readthrough.create_update()\n\n if is_api_request(request):\n return HttpResponse()\n return redirect_to_referer(request)\n\n\ndef find_mentions(user, content):\n \"\"\"detect @mentions in raw status content\"\"\"\n if not content:\n return {}\n # The regex has nested match groups, so the 0th entry has the full (outer) match\n # And because the strict username starts with @, the username is 1st char onward\n usernames = [m[0][1:] for m in re.findall(regex.STRICT_USERNAME, content)]\n\n known_users = (\n models.User.viewer_aware_objects(user)\n .filter(Q(username__in=usernames) | Q(localname__in=usernames))\n .distinct()\n )\n # Prepare a lookup based on both username and localname\n username_dict = {\n **{f\"@{u.username}\": u for u in known_users},\n **{f\"@{u.localname}\": u for u in known_users.filter(local=True)},\n }\n\n # Users not captured here could be blocked or not yet loaded on the server\n not_found = set(usernames) - set(username_dict.keys())\n for username in not_found:\n mention_user = handle_remote_webfinger(username, unknown_only=True)\n if not mention_user:\n # this user is blocked or can't be found\n continue\n username_dict[f\"@{mention_user.username}\"] = mention_user\n username_dict[f\"@{mention_user.localname}\"] = mention_user\n return username_dict\n\n\ndef find_or_create_hashtags(content):\n \"\"\"detect #hashtags in raw status content\n\n it stores hashtags case-sensitive, but ensures that an existing\n hashtag with different case are found and re-used. for example,\n an existing #BookWyrm hashtag will be found and used even if the\n status content is using #bookwyrm.\n \"\"\"\n if not content:\n return {}\n\n found_hashtags = {t.lower(): t for t in re.findall(regex.HASHTAG, content)}\n if len(found_hashtags) == 0:\n return {}\n\n known_hashtags = {\n t.name.lower(): t\n for t in models.Hashtag.objects.filter(\n Q(name__in=found_hashtags.keys())\n ).distinct()\n }\n\n not_found = found_hashtags.keys() - known_hashtags.keys()\n for lower_name in not_found:\n tag_name = found_hashtags[lower_name]\n mention_hashtag = models.Hashtag(name=tag_name)\n mention_hashtag.save()\n known_hashtags[lower_name] = mention_hashtag\n\n return {found_hashtags[k]: v for k, v in known_hashtags.items()}\n\n\ndef format_links(content):\n \"\"\"detect and format links\"\"\"\n validator = URLValidator()\n formatted_content = \"\"\n split_content = re.split(r\"(\\s+)\", content)\n\n for potential_link in split_content:\n if not potential_link:\n continue\n wrapped = _wrapped(potential_link)\n if wrapped:\n wrapper_close = potential_link[-1]\n formatted_content += potential_link[0]\n potential_link = potential_link[1:-1]\n\n ends_with_punctuation = _ends_with_punctuation(potential_link)\n if ends_with_punctuation:\n punctuation_glyph = potential_link[-1]\n potential_link = potential_link[0:-1]\n\n try:\n # raises an error on anything that's not a valid link\n validator(potential_link)\n\n # use everything but the scheme in the presentation of the link\n url = urlparse(potential_link)\n link = url.netloc + url.path + url.params\n if url.query != \"\":\n link += \"?\" + url.query\n if url.fragment != \"\":\n link += \"#\" + url.fragment\n\n formatted_content += f'<a href=\"{potential_link}\">{link}</a>'\n except (ValidationError, UnicodeError):\n formatted_content += potential_link\n\n if wrapped:\n formatted_content += wrapper_close\n\n if ends_with_punctuation:\n formatted_content += punctuation_glyph\n\n return formatted_content\n\n\ndef _wrapped(text):\n \"\"\"check if a line of text is wrapped\"\"\"\n wrappers = [(\"(\", \")\"), (\"[\", \"]\"), (\"{\", \"}\")]\n for wrapper in wrappers:\n if text[0] == wrapper[0] and text[-1] == wrapper[-1]:\n return True\n return False\n\n\ndef _ends_with_punctuation(text):\n \"\"\"check if a line of text ends with a punctuation glyph\"\"\"\n glyphs = [\".\", \",\", \";\", \":\", \"!\", \"?\", \"\u201d\", \"\u2019\", '\"', \"\u00bb\"]\n for glyph in glyphs:\n if text[-1] == glyph:\n return True\n return False\n\n\ndef to_markdown(content):\n \"\"\"catch links and convert to markdown\"\"\"\n content = format_links(content)\n content = markdown(content)\n # sanitize resulting html\n return sanitizer.clean(content)\n", "path": "bookwyrm/views/status.py"}]} | 3,938 | 314 |
gh_patches_debug_37188 | rasdani/github-patches | git_diff | pfnet__pytorch-pfn-extras-159 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make TensorBoardWriter no-op if tensorboard is not available
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `example/mnist.py`
Content:
```
1 import argparse
2 import torch
3 import torch.nn as nn
4 import torch.nn.functional as F
5 import torch.optim as optim
6 from torchvision import datasets, transforms
7
8 import pytorch_pfn_extras as ppe
9 import pytorch_pfn_extras.training.extensions as extensions
10
11
12 class Net(nn.Module):
13 def __init__(self):
14 super().__init__()
15 self.conv1 = nn.Conv2d(1, 20, 5, 1)
16 self.conv2 = nn.Conv2d(20, 50, 5, 1)
17 self.fc1 = nn.Linear(4 * 4 * 50, 500)
18 self.fc2 = nn.Linear(500, 10)
19
20 def forward(self, x):
21 x = F.relu(self.conv1(x))
22 x = F.max_pool2d(x, 2, 2)
23 x = F.relu(self.conv2(x))
24 x = F.max_pool2d(x, 2, 2)
25 x = x.flatten(start_dim=1)
26 x = F.relu(self.fc1(x))
27 x = self.fc2(x)
28 ppe.nn.ensure(x, shape=(None, 10))
29 return F.log_softmax(x, dim=1)
30
31
32 def train(manager, args, model, device, train_loader):
33 while not manager.stop_trigger:
34 model.train()
35 for _, (data, target) in enumerate(train_loader):
36 with manager.run_iteration(step_optimizers=['main']):
37 data, target = data.to(device), target.to(device)
38 output = model(data)
39 loss = F.nll_loss(output, target)
40 ppe.reporting.report({'train/loss': loss.item()})
41 loss.backward()
42
43
44 def test(args, model, device, data, target):
45 """ The extension loops over the iterator in order to
46 drive the evaluator progress bar and reporting
47 averages
48 """
49 model.eval()
50 data, target = data.to(device), target.to(device)
51 output = model(data)
52 # Final result will be average of averages of the same size
53 test_loss = F.nll_loss(output, target, reduction='mean').item()
54 ppe.reporting.report({'val/loss': test_loss})
55 pred = output.argmax(dim=1, keepdim=True)
56
57 correct = pred.eq(target.view_as(pred)).sum().item()
58 ppe.reporting.report({'val/acc': correct / len(data)})
59
60
61 def main():
62 # Training settings
63 parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
64 parser.add_argument('--batch-size', type=int, default=64, metavar='N',
65 help='input batch size for training (default: 64)')
66 parser.add_argument('--test-batch-size', type=int, default=1000,
67 metavar='N',
68 help='input batch size for testing (default: 1000)')
69 parser.add_argument('--epochs', type=int, default=10, metavar='N',
70 help='number of epochs to train (default: 10)')
71 parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
72 help='learning rate (default: 0.01)')
73 parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
74 help='SGD momentum (default: 0.5)')
75 parser.add_argument('--no-cuda', dest='cuda',
76 action='store_false', default=True,
77 help='disables CUDA training')
78 parser.add_argument('--seed', type=int, default=1, metavar='S',
79 help='random seed (default: 1)')
80 parser.add_argument('--save-model', action='store_true', default=False,
81 help='For Saving the current Model')
82 parser.add_argument('--snapshot', type=str, default=None,
83 help='path to snapshot file')
84 args = parser.parse_args()
85 use_cuda = args.cuda and torch.cuda.is_available()
86
87 torch.manual_seed(args.seed)
88
89 device = torch.device("cuda" if use_cuda else "cpu")
90
91 kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
92 train_loader = torch.utils.data.DataLoader(
93 datasets.MNIST('../data', train=True, download=True,
94 transform=transforms.Compose([
95 transforms.ToTensor(),
96 transforms.Normalize((0.1307,), (0.3081,))
97 ])),
98 batch_size=args.batch_size, shuffle=True,
99 **kwargs) # type: ignore[arg-type]
100 test_loader = torch.utils.data.DataLoader(
101 datasets.MNIST('../data', train=False, transform=transforms.Compose([
102 transforms.ToTensor(),
103 transforms.Normalize((0.1307,), (0.3081,))
104 ])),
105 batch_size=args.test_batch_size, shuffle=True,
106 **kwargs) # type: ignore[arg-type]
107
108 model = Net()
109 model.to(device)
110
111 optimizer = optim.SGD(
112 model.parameters(), lr=args.lr, momentum=args.momentum)
113
114 # manager.extend(...) also works
115 my_extensions = [
116 extensions.LogReport(),
117 extensions.ProgressBar(),
118 extensions.observe_lr(optimizer=optimizer),
119 extensions.ParameterStatistics(model, prefix='model'),
120 extensions.VariableStatisticsPlot(model),
121 extensions.Evaluator(
122 test_loader, model,
123 eval_func=lambda data, target:
124 test(args, model, device, data, target),
125 progress_bar=True),
126 extensions.PlotReport(
127 ['train/loss', 'val/loss'], 'epoch', filename='loss.png'),
128 extensions.PrintReport(['epoch', 'iteration',
129 'train/loss', 'lr', 'model/fc2.bias/grad/min',
130 'val/loss', 'val/acc']),
131 extensions.snapshot(),
132 ]
133 # Custom stop triggers can be added to the manager and
134 # their status accessed through `manager.stop_trigger`
135 trigger = None
136 # trigger = ppe.training.triggers.EarlyStoppingTrigger(
137 # check_trigger=(1, 'epoch'), monitor='val/loss')
138 manager = ppe.training.ExtensionsManager(
139 model, optimizer, args.epochs,
140 extensions=my_extensions,
141 iters_per_epoch=len(train_loader),
142 stop_trigger=trigger)
143 # Lets load the snapshot
144 if args.snapshot is not None:
145 state = torch.load(args.snapshot)
146 manager.load_state_dict(state)
147 train(manager, args, model, device, train_loader)
148 # Test function is called from the evaluator extension
149 # to get access to the reporter and other facilities
150 # test(args, model, device, test_loader)
151
152 if (args.save_model):
153 torch.save(model.state_dict(), "mnist_cnn.pt")
154
155
156 if __name__ == '__main__':
157 main()
158
```
Path: `pytorch_pfn_extras/writing/_tensorboard_writer.py`
Content:
```
1 from typing import Any, KeysView, Optional
2
3 from pytorch_pfn_extras.writing._writer_base import (
4 _TargetType, _SaveFun, _FileSystem
5 )
6
7
8 class TensorBoardWriter(object):
9 """ Writer that sends statistics to TensorBoard.
10
11 This class contains a `torch.utils.tensorboard.SummaryWriter`
12 object that is used to send the collected statistics to TensorBoard.
13 A list of stats can be specified to report only the desired ones.
14
15 Args:
16 savefun: Ignored.
17 fs: Ignored.
18 out_dir: Passed as ``log_dir`` argument to SummaryWriter.
19 stats (list): List of statistic keys.
20 kwds: Passed as an additional arguments to SummaryWriter.
21 """
22 def __init__(
23 self,
24 savefun: Optional[_SaveFun] = None,
25 fs: _FileSystem = None,
26 out_dir: Optional[str] = None,
27 stats: Optional[KeysView[str]] = None,
28 **kwds: Any
29 ) -> None:
30 from torch.utils.tensorboard import SummaryWriter
31 self._stats = stats
32 self._writer: Optional[SummaryWriter] = (
33 SummaryWriter( # type: ignore[no-untyped-call]
34 log_dir=out_dir, **kwds))
35
36 def __del__(self) -> None:
37 self.finalize()
38
39 def __call__(
40 self,
41 filename: str,
42 out_dir: str,
43 target: _TargetType,
44 *,
45 savefun: Optional[_SaveFun] = None,
46 append: bool = False,
47 ) -> None:
48 """Sends the statistics to the TensorBoard.
49
50 Args:
51 filename: Ignored.
52 out_dir: Ignored.
53 target (dict or list): The statistics of the iteration. If given as
54 a list, only the last element (assumed to be a dict containing
55 the latest iteration statistics) is reported.
56 savefun: Ignored.
57 append: Ignored.
58 """
59 if self._writer is None:
60 raise RuntimeError('TensorBoardWriter already finalized')
61
62 stats_cpu = target
63 if isinstance(target, list):
64 stats_cpu = target[-1]
65
66 if not isinstance(stats_cpu, dict):
67 raise TypeError('target must be dict or list of dicts')
68 keys = stats_cpu.keys()
69 if self._stats is not None:
70 keys = self._stats
71 for key in keys:
72 value = stats_cpu[key]
73 self._writer.add_scalar( # type: ignore[no-untyped-call]
74 key, value, stats_cpu['iteration'])
75
76 def finalize(self) -> None:
77 writer = self._writer
78 if writer is not None:
79 writer.close() # type: ignore[no-untyped-call]
80 self._writer = None
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/example/mnist.py b/example/mnist.py
--- a/example/mnist.py
+++ b/example/mnist.py
@@ -114,6 +114,12 @@
# manager.extend(...) also works
my_extensions = [
extensions.LogReport(),
+
+ # Enables TensorBoard support.
+ # Run `tensorboard --logdir runs` to launch the TensorBoard.
+ extensions.LogReport(
+ writer=ppe.writing.TensorBoardWriter(out_dir='runs'),
+ trigger=(1, 'iteration')),
extensions.ProgressBar(),
extensions.observe_lr(optimizer=optimizer),
extensions.ParameterStatistics(model, prefix='model'),
diff --git a/pytorch_pfn_extras/writing/_tensorboard_writer.py b/pytorch_pfn_extras/writing/_tensorboard_writer.py
--- a/pytorch_pfn_extras/writing/_tensorboard_writer.py
+++ b/pytorch_pfn_extras/writing/_tensorboard_writer.py
@@ -1,4 +1,5 @@
from typing import Any, KeysView, Optional
+import warnings
from pytorch_pfn_extras.writing._writer_base import (
_TargetType, _SaveFun, _FileSystem
@@ -27,10 +28,17 @@
stats: Optional[KeysView[str]] = None,
**kwds: Any
) -> None:
- from torch.utils.tensorboard import SummaryWriter
+ self._writer = None
+ try:
+ import torch.utils.tensorboard
+ except ImportError:
+ warnings.warn(
+ 'tensorboard is unavailable. '
+ 'TensorBoardWriter will do nothing.')
+ return
self._stats = stats
- self._writer: Optional[SummaryWriter] = (
- SummaryWriter( # type: ignore[no-untyped-call]
+ self._writer = (
+ torch.utils.tensorboard.SummaryWriter( # type: ignore[no-untyped-call]
log_dir=out_dir, **kwds))
def __del__(self) -> None:
@@ -57,8 +65,7 @@
append: Ignored.
"""
if self._writer is None:
- raise RuntimeError('TensorBoardWriter already finalized')
-
+ return
stats_cpu = target
if isinstance(target, list):
stats_cpu = target[-1]
@@ -74,7 +81,6 @@
key, value, stats_cpu['iteration'])
def finalize(self) -> None:
- writer = self._writer
- if writer is not None:
- writer.close() # type: ignore[no-untyped-call]
- self._writer = None
+ if self._writer is not None:
+ self._writer.close() # type: ignore[no-untyped-call]
+ self._writer = None
| {"golden_diff": "diff --git a/example/mnist.py b/example/mnist.py\n--- a/example/mnist.py\n+++ b/example/mnist.py\n@@ -114,6 +114,12 @@\n # manager.extend(...) also works\n my_extensions = [\n extensions.LogReport(),\n+\n+ # Enables TensorBoard support.\n+ # Run `tensorboard --logdir runs` to launch the TensorBoard.\n+ extensions.LogReport(\n+ writer=ppe.writing.TensorBoardWriter(out_dir='runs'),\n+ trigger=(1, 'iteration')),\n extensions.ProgressBar(),\n extensions.observe_lr(optimizer=optimizer),\n extensions.ParameterStatistics(model, prefix='model'),\ndiff --git a/pytorch_pfn_extras/writing/_tensorboard_writer.py b/pytorch_pfn_extras/writing/_tensorboard_writer.py\n--- a/pytorch_pfn_extras/writing/_tensorboard_writer.py\n+++ b/pytorch_pfn_extras/writing/_tensorboard_writer.py\n@@ -1,4 +1,5 @@\n from typing import Any, KeysView, Optional\n+import warnings\n \n from pytorch_pfn_extras.writing._writer_base import (\n _TargetType, _SaveFun, _FileSystem\n@@ -27,10 +28,17 @@\n stats: Optional[KeysView[str]] = None,\n **kwds: Any\n ) -> None:\n- from torch.utils.tensorboard import SummaryWriter\n+ self._writer = None\n+ try:\n+ import torch.utils.tensorboard\n+ except ImportError:\n+ warnings.warn(\n+ 'tensorboard is unavailable. '\n+ 'TensorBoardWriter will do nothing.')\n+ return\n self._stats = stats\n- self._writer: Optional[SummaryWriter] = (\n- SummaryWriter( # type: ignore[no-untyped-call]\n+ self._writer = (\n+ torch.utils.tensorboard.SummaryWriter( # type: ignore[no-untyped-call]\n log_dir=out_dir, **kwds))\n \n def __del__(self) -> None:\n@@ -57,8 +65,7 @@\n append: Ignored.\n \"\"\"\n if self._writer is None:\n- raise RuntimeError('TensorBoardWriter already finalized')\n-\n+ return\n stats_cpu = target\n if isinstance(target, list):\n stats_cpu = target[-1]\n@@ -74,7 +81,6 @@\n key, value, stats_cpu['iteration'])\n \n def finalize(self) -> None:\n- writer = self._writer\n- if writer is not None:\n- writer.close() # type: ignore[no-untyped-call]\n- self._writer = None\n+ if self._writer is not None:\n+ self._writer.close() # type: ignore[no-untyped-call]\n+ self._writer = None\n", "issue": "Make TensorBoardWriter no-op if tensorboard is not available\n\n", "before_files": [{"content": "import argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\n\nimport pytorch_pfn_extras as ppe\nimport pytorch_pfn_extras.training.extensions as extensions\n\n\nclass Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5, 1)\n self.conv2 = nn.Conv2d(20, 50, 5, 1)\n self.fc1 = nn.Linear(4 * 4 * 50, 500)\n self.fc2 = nn.Linear(500, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.flatten(start_dim=1)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n ppe.nn.ensure(x, shape=(None, 10))\n return F.log_softmax(x, dim=1)\n\n\ndef train(manager, args, model, device, train_loader):\n while not manager.stop_trigger:\n model.train()\n for _, (data, target) in enumerate(train_loader):\n with manager.run_iteration(step_optimizers=['main']):\n data, target = data.to(device), target.to(device)\n output = model(data)\n loss = F.nll_loss(output, target)\n ppe.reporting.report({'train/loss': loss.item()})\n loss.backward()\n\n\ndef test(args, model, device, data, target):\n \"\"\" The extension loops over the iterator in order to\n drive the evaluator progress bar and reporting\n averages\n \"\"\"\n model.eval()\n data, target = data.to(device), target.to(device)\n output = model(data)\n # Final result will be average of averages of the same size\n test_loss = F.nll_loss(output, target, reduction='mean').item()\n ppe.reporting.report({'val/loss': test_loss})\n pred = output.argmax(dim=1, keepdim=True)\n\n correct = pred.eq(target.view_as(pred)).sum().item()\n ppe.reporting.report({'val/acc': correct / len(data)})\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test-batch-size', type=int, default=1000,\n metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--no-cuda', dest='cuda',\n action='store_false', default=True,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\n parser.add_argument('--snapshot', type=str, default=None,\n help='path to snapshot file')\n args = parser.parse_args()\n use_cuda = args.cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True,\n **kwargs) # type: ignore[arg-type]\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.test_batch_size, shuffle=True,\n **kwargs) # type: ignore[arg-type]\n\n model = Net()\n model.to(device)\n\n optimizer = optim.SGD(\n model.parameters(), lr=args.lr, momentum=args.momentum)\n\n # manager.extend(...) also works\n my_extensions = [\n extensions.LogReport(),\n extensions.ProgressBar(),\n extensions.observe_lr(optimizer=optimizer),\n extensions.ParameterStatistics(model, prefix='model'),\n extensions.VariableStatisticsPlot(model),\n extensions.Evaluator(\n test_loader, model,\n eval_func=lambda data, target:\n test(args, model, device, data, target),\n progress_bar=True),\n extensions.PlotReport(\n ['train/loss', 'val/loss'], 'epoch', filename='loss.png'),\n extensions.PrintReport(['epoch', 'iteration',\n 'train/loss', 'lr', 'model/fc2.bias/grad/min',\n 'val/loss', 'val/acc']),\n extensions.snapshot(),\n ]\n # Custom stop triggers can be added to the manager and\n # their status accessed through `manager.stop_trigger`\n trigger = None\n # trigger = ppe.training.triggers.EarlyStoppingTrigger(\n # check_trigger=(1, 'epoch'), monitor='val/loss')\n manager = ppe.training.ExtensionsManager(\n model, optimizer, args.epochs,\n extensions=my_extensions,\n iters_per_epoch=len(train_loader),\n stop_trigger=trigger)\n # Lets load the snapshot\n if args.snapshot is not None:\n state = torch.load(args.snapshot)\n manager.load_state_dict(state)\n train(manager, args, model, device, train_loader)\n # Test function is called from the evaluator extension\n # to get access to the reporter and other facilities\n # test(args, model, device, test_loader)\n\n if (args.save_model):\n torch.save(model.state_dict(), \"mnist_cnn.pt\")\n\n\nif __name__ == '__main__':\n main()\n", "path": "example/mnist.py"}, {"content": "from typing import Any, KeysView, Optional\n\nfrom pytorch_pfn_extras.writing._writer_base import (\n _TargetType, _SaveFun, _FileSystem\n)\n\n\nclass TensorBoardWriter(object):\n \"\"\" Writer that sends statistics to TensorBoard.\n\n This class contains a `torch.utils.tensorboard.SummaryWriter`\n object that is used to send the collected statistics to TensorBoard.\n A list of stats can be specified to report only the desired ones.\n\n Args:\n savefun: Ignored.\n fs: Ignored.\n out_dir: Passed as ``log_dir`` argument to SummaryWriter.\n stats (list): List of statistic keys.\n kwds: Passed as an additional arguments to SummaryWriter.\n \"\"\"\n def __init__(\n self,\n savefun: Optional[_SaveFun] = None,\n fs: _FileSystem = None,\n out_dir: Optional[str] = None,\n stats: Optional[KeysView[str]] = None,\n **kwds: Any\n ) -> None:\n from torch.utils.tensorboard import SummaryWriter\n self._stats = stats\n self._writer: Optional[SummaryWriter] = (\n SummaryWriter( # type: ignore[no-untyped-call]\n log_dir=out_dir, **kwds))\n\n def __del__(self) -> None:\n self.finalize()\n\n def __call__(\n self,\n filename: str,\n out_dir: str,\n target: _TargetType,\n *,\n savefun: Optional[_SaveFun] = None,\n append: bool = False,\n ) -> None:\n \"\"\"Sends the statistics to the TensorBoard.\n\n Args:\n filename: Ignored.\n out_dir: Ignored.\n target (dict or list): The statistics of the iteration. If given as\n a list, only the last element (assumed to be a dict containing\n the latest iteration statistics) is reported.\n savefun: Ignored.\n append: Ignored.\n \"\"\"\n if self._writer is None:\n raise RuntimeError('TensorBoardWriter already finalized')\n\n stats_cpu = target\n if isinstance(target, list):\n stats_cpu = target[-1]\n\n if not isinstance(stats_cpu, dict):\n raise TypeError('target must be dict or list of dicts')\n keys = stats_cpu.keys()\n if self._stats is not None:\n keys = self._stats\n for key in keys:\n value = stats_cpu[key]\n self._writer.add_scalar( # type: ignore[no-untyped-call]\n key, value, stats_cpu['iteration'])\n\n def finalize(self) -> None:\n writer = self._writer\n if writer is not None:\n writer.close() # type: ignore[no-untyped-call]\n self._writer = None\n", "path": "pytorch_pfn_extras/writing/_tensorboard_writer.py"}], "after_files": [{"content": "import argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\n\nimport pytorch_pfn_extras as ppe\nimport pytorch_pfn_extras.training.extensions as extensions\n\n\nclass Net(nn.Module):\n def __init__(self):\n super().__init__()\n self.conv1 = nn.Conv2d(1, 20, 5, 1)\n self.conv2 = nn.Conv2d(20, 50, 5, 1)\n self.fc1 = nn.Linear(4 * 4 * 50, 500)\n self.fc2 = nn.Linear(500, 10)\n\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(x, 2, 2)\n x = F.relu(self.conv2(x))\n x = F.max_pool2d(x, 2, 2)\n x = x.flatten(start_dim=1)\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n ppe.nn.ensure(x, shape=(None, 10))\n return F.log_softmax(x, dim=1)\n\n\ndef train(manager, args, model, device, train_loader):\n while not manager.stop_trigger:\n model.train()\n for _, (data, target) in enumerate(train_loader):\n with manager.run_iteration(step_optimizers=['main']):\n data, target = data.to(device), target.to(device)\n output = model(data)\n loss = F.nll_loss(output, target)\n ppe.reporting.report({'train/loss': loss.item()})\n loss.backward()\n\n\ndef test(args, model, device, data, target):\n \"\"\" The extension loops over the iterator in order to\n drive the evaluator progress bar and reporting\n averages\n \"\"\"\n model.eval()\n data, target = data.to(device), target.to(device)\n output = model(data)\n # Final result will be average of averages of the same size\n test_loss = F.nll_loss(output, target, reduction='mean').item()\n ppe.reporting.report({'val/loss': test_loss})\n pred = output.argmax(dim=1, keepdim=True)\n\n correct = pred.eq(target.view_as(pred)).sum().item()\n ppe.reporting.report({'val/acc': correct / len(data)})\n\n\ndef main():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test-batch-size', type=int, default=1000,\n metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--no-cuda', dest='cuda',\n action='store_false', default=True,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\n parser.add_argument('--snapshot', type=str, default=None,\n help='path to snapshot file')\n args = parser.parse_args()\n use_cuda = args.cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True,\n **kwargs) # type: ignore[arg-type]\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.test_batch_size, shuffle=True,\n **kwargs) # type: ignore[arg-type]\n\n model = Net()\n model.to(device)\n\n optimizer = optim.SGD(\n model.parameters(), lr=args.lr, momentum=args.momentum)\n\n # manager.extend(...) also works\n my_extensions = [\n extensions.LogReport(),\n\n # Enables TensorBoard support.\n # Run `tensorboard --logdir runs` to launch the TensorBoard.\n extensions.LogReport(\n writer=ppe.writing.TensorBoardWriter(out_dir='runs'),\n trigger=(1, 'iteration')),\n extensions.ProgressBar(),\n extensions.observe_lr(optimizer=optimizer),\n extensions.ParameterStatistics(model, prefix='model'),\n extensions.VariableStatisticsPlot(model),\n extensions.Evaluator(\n test_loader, model,\n eval_func=lambda data, target:\n test(args, model, device, data, target),\n progress_bar=True),\n extensions.PlotReport(\n ['train/loss', 'val/loss'], 'epoch', filename='loss.png'),\n extensions.PrintReport(['epoch', 'iteration',\n 'train/loss', 'lr', 'model/fc2.bias/grad/min',\n 'val/loss', 'val/acc']),\n extensions.snapshot(),\n ]\n # Custom stop triggers can be added to the manager and\n # their status accessed through `manager.stop_trigger`\n trigger = None\n # trigger = ppe.training.triggers.EarlyStoppingTrigger(\n # check_trigger=(1, 'epoch'), monitor='val/loss')\n manager = ppe.training.ExtensionsManager(\n model, optimizer, args.epochs,\n extensions=my_extensions,\n iters_per_epoch=len(train_loader),\n stop_trigger=trigger)\n # Lets load the snapshot\n if args.snapshot is not None:\n state = torch.load(args.snapshot)\n manager.load_state_dict(state)\n train(manager, args, model, device, train_loader)\n # Test function is called from the evaluator extension\n # to get access to the reporter and other facilities\n # test(args, model, device, test_loader)\n\n if (args.save_model):\n torch.save(model.state_dict(), \"mnist_cnn.pt\")\n\n\nif __name__ == '__main__':\n main()\n", "path": "example/mnist.py"}, {"content": "from typing import Any, KeysView, Optional\nimport warnings\n\nfrom pytorch_pfn_extras.writing._writer_base import (\n _TargetType, _SaveFun, _FileSystem\n)\n\n\nclass TensorBoardWriter(object):\n \"\"\" Writer that sends statistics to TensorBoard.\n\n This class contains a `torch.utils.tensorboard.SummaryWriter`\n object that is used to send the collected statistics to TensorBoard.\n A list of stats can be specified to report only the desired ones.\n\n Args:\n savefun: Ignored.\n fs: Ignored.\n out_dir: Passed as ``log_dir`` argument to SummaryWriter.\n stats (list): List of statistic keys.\n kwds: Passed as an additional arguments to SummaryWriter.\n \"\"\"\n def __init__(\n self,\n savefun: Optional[_SaveFun] = None,\n fs: _FileSystem = None,\n out_dir: Optional[str] = None,\n stats: Optional[KeysView[str]] = None,\n **kwds: Any\n ) -> None:\n self._writer = None\n try:\n import torch.utils.tensorboard\n except ImportError:\n warnings.warn(\n 'tensorboard is unavailable. '\n 'TensorBoardWriter will do nothing.')\n return\n self._stats = stats\n self._writer = (\n torch.utils.tensorboard.SummaryWriter( # type: ignore[no-untyped-call]\n log_dir=out_dir, **kwds))\n\n def __del__(self) -> None:\n self.finalize()\n\n def __call__(\n self,\n filename: str,\n out_dir: str,\n target: _TargetType,\n *,\n savefun: Optional[_SaveFun] = None,\n append: bool = False,\n ) -> None:\n \"\"\"Sends the statistics to the TensorBoard.\n\n Args:\n filename: Ignored.\n out_dir: Ignored.\n target (dict or list): The statistics of the iteration. If given as\n a list, only the last element (assumed to be a dict containing\n the latest iteration statistics) is reported.\n savefun: Ignored.\n append: Ignored.\n \"\"\"\n if self._writer is None:\n return\n stats_cpu = target\n if isinstance(target, list):\n stats_cpu = target[-1]\n\n if not isinstance(stats_cpu, dict):\n raise TypeError('target must be dict or list of dicts')\n keys = stats_cpu.keys()\n if self._stats is not None:\n keys = self._stats\n for key in keys:\n value = stats_cpu[key]\n self._writer.add_scalar( # type: ignore[no-untyped-call]\n key, value, stats_cpu['iteration'])\n\n def finalize(self) -> None:\n if self._writer is not None:\n self._writer.close() # type: ignore[no-untyped-call]\n self._writer = None\n", "path": "pytorch_pfn_extras/writing/_tensorboard_writer.py"}]} | 2,863 | 612 |
gh_patches_debug_2938 | rasdani/github-patches | git_diff | Parsl__parsl-613 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TorqueProvider fails on NSCC
The following patch is required in order to run the `TorqueProvider` on NSCC:
```
[nscc04] ~/libsubmit >git diff
diff --git a/libsubmit/providers/torque/template.py b/libsubmit/providers/torque/template.py
index a00ce7c..056c648 100644
--- a/libsubmit/providers/torque/template.py
+++ b/libsubmit/providers/torque/template.py
@@ -8,7 +8,6 @@ template_string = '''#!/bin/bash
#PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}
#PBS -o ${submit_script_dir}/${jobname}.submit.stdout
#PBS -e ${submit_script_dir}/${jobname}.submit.stderr
-#PBS -v WORKER_LOGGING_LEVEL
${overrides}
export JOBNAME="${jobname}"
```
Otherwise, the job fails with `qsub: cannot send environment with the job`. Could we just merge the patch, or should we make this configurable somehow?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/providers/torque/template.py`
Content:
```
1 template_string = '''#!/bin/bash
2
3 #PBS -S /bin/bash
4 #PBS -N ${jobname}
5 #PBS -m n
6 #PBS -k eo
7 #PBS -l walltime=$walltime
8 #PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}
9 #PBS -o ${submit_script_dir}/${jobname}.submit.stdout
10 #PBS -e ${submit_script_dir}/${jobname}.submit.stderr
11 #PBS -v WORKER_LOGGING_LEVEL
12 ${overrides}
13
14 export JOBNAME="${jobname}"
15
16 ${user_script}
17
18 '''
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsl/providers/torque/template.py b/parsl/providers/torque/template.py
--- a/parsl/providers/torque/template.py
+++ b/parsl/providers/torque/template.py
@@ -8,7 +8,6 @@
#PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}
#PBS -o ${submit_script_dir}/${jobname}.submit.stdout
#PBS -e ${submit_script_dir}/${jobname}.submit.stderr
-#PBS -v WORKER_LOGGING_LEVEL
${overrides}
export JOBNAME="${jobname}"
| {"golden_diff": "diff --git a/parsl/providers/torque/template.py b/parsl/providers/torque/template.py\n--- a/parsl/providers/torque/template.py\n+++ b/parsl/providers/torque/template.py\n@@ -8,7 +8,6 @@\n #PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}\n #PBS -o ${submit_script_dir}/${jobname}.submit.stdout\n #PBS -e ${submit_script_dir}/${jobname}.submit.stderr\n-#PBS -v WORKER_LOGGING_LEVEL\n ${overrides}\n \n export JOBNAME=\"${jobname}\"\n", "issue": "TorqueProvider fails on NSCC \nThe following patch is required in order to run the `TorqueProvider` on NSCC:\r\n```\r\n[nscc04] ~/libsubmit >git diff\r\ndiff --git a/libsubmit/providers/torque/template.py b/libsubmit/providers/torque/template.py\r\nindex a00ce7c..056c648 100644\r\n--- a/libsubmit/providers/torque/template.py\r\n+++ b/libsubmit/providers/torque/template.py\r\n@@ -8,7 +8,6 @@ template_string = '''#!/bin/bash\r\n #PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}\r\n #PBS -o ${submit_script_dir}/${jobname}.submit.stdout\r\n #PBS -e ${submit_script_dir}/${jobname}.submit.stderr\r\n-#PBS -v WORKER_LOGGING_LEVEL\r\n ${overrides}\r\n\r\n export JOBNAME=\"${jobname}\"\r\n```\r\n\r\nOtherwise, the job fails with `qsub: cannot send environment with the job`. Could we just merge the patch, or should we make this configurable somehow?\n", "before_files": [{"content": "template_string = '''#!/bin/bash\n\n#PBS -S /bin/bash\n#PBS -N ${jobname}\n#PBS -m n\n#PBS -k eo\n#PBS -l walltime=$walltime\n#PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}\n#PBS -o ${submit_script_dir}/${jobname}.submit.stdout\n#PBS -e ${submit_script_dir}/${jobname}.submit.stderr\n#PBS -v WORKER_LOGGING_LEVEL\n${overrides}\n\nexport JOBNAME=\"${jobname}\"\n\n${user_script}\n\n'''\n", "path": "parsl/providers/torque/template.py"}], "after_files": [{"content": "template_string = '''#!/bin/bash\n\n#PBS -S /bin/bash\n#PBS -N ${jobname}\n#PBS -m n\n#PBS -k eo\n#PBS -l walltime=$walltime\n#PBS -l nodes=${nodes_per_block}:ppn=${tasks_per_node}\n#PBS -o ${submit_script_dir}/${jobname}.submit.stdout\n#PBS -e ${submit_script_dir}/${jobname}.submit.stderr\n${overrides}\n\nexport JOBNAME=\"${jobname}\"\n\n${user_script}\n\n'''\n", "path": "parsl/providers/torque/template.py"}]} | 638 | 123 |
gh_patches_debug_584 | rasdani/github-patches | git_diff | pex-tool__pex-1709 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.77
On the docket:
+ [x] Fix pathologic lock creation slowness. #1707
+ [x] Support uncompressed PEXes. (#1705)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.76"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.76"
+__version__ = "2.1.77"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.76\"\n+__version__ = \"2.1.77\"\n", "issue": "Release 2.1.77\nOn the docket:\r\n+ [x] Fix pathologic lock creation slowness. #1707 \r\n+ [x] Support uncompressed PEXes. (#1705)\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.76\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.77\"\n", "path": "pex/version.py"}]} | 356 | 96 |
gh_patches_debug_38123 | rasdani/github-patches | git_diff | kubeflow__pipelines-5750 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[v2compat] configurable default pipeline root
Quoted from https://github.com/kubeflow/pipelines/issues/4649#issuecomment-844035914
Blocks https://github.com/kubeflow/pipelines/issues/5680
> FYI, KFP v2 compatible mode has been released, see documentation: https://www.kubeflow.org/docs/components/pipelines/sdk/v2/.
>
> It doesn't support artifact repository configuration, this is one of the things we want to support too. So I'm posting early thoughts on this related issue.
>
> Let me first try to summarize requirements for configuring artifact repositories for both KFP v2 compatible and v2.
>
> ## Object store specific credentials requirements
> For GCS, AWS S3, we suggest setting up credentials, so that they represent identity of the pipeline step, so that not only artifact upload/download, calls to other Cloud services should also use the same credentials. For this reason, we don't recommend setting credentials in artifact repository config. The suggestion is to configure the identity transparently if possible using GCP workload identity or AWS IRSA. If credentials are really necessary, they can be configured using pipeline DSL via [kfp.gcp.use_gcp_secret](https://kubeflow-pipelines.readthedocs.io/en/latest/source/kfp.extensions.html#kfp.gcp.use_gcp_secret) or [kfp.aws.use_aws_secret](https://kubeflow-pipelines.readthedocs.io/en/latest/source/kfp.extensions.html#kfp.aws.use_aws_secret) etc. These principles should apply to other Cloud Providers that has credentials that can be used with all its services.
>
> For on-prem object store like MinIO, the credentials do not represent an identity, they are only used to access a specified object store instance. Therefore, it's reasonable to include them in artifact repository config.
>
> ## In summary
> * For GCS, only pipeline root needs to be configurable.
> * For AWS S3, besides pipeline root, we also need region, endpoint etc to be configurable.
> * For MinIO or similar on-prem object stores, besides pipeline root, we also need endpoint, credentials to be configurable.
>
> We cannot implement a spec for every possible object stores, so likely we should use the same spec as what [Go CDK](https://gocloud.dev/) supports or depend on cloud provider contributions.
>
> Go CDK supports provider specific query params to configure some things other than object key, so we might consider adopting these query params, so that pipeline root can be more expressive, so we might not need other configurations.
> e.g. for S3, it's possible to configure region via a query param: https://gocloud.dev/howto/blob/#s3
>
> ```
> s3://my-bucket?region=us-west-1
> ```
>
> ## How we configure pipeline root, other configs and credentials?
> Ideally, pipeline root can include all other configs, so that we can uniquely identify an artifact.
> Ideally, credentials should be configurable transparently.
>
> When both ideal requirements are met, we only need to support namespace level default pipeline root. All other configurations can be done by specifying different pipeline roots.
>
> However, now MinIO violates the requirement that credentials can be configured transparently. Therefore, we need a mechanism to either
>
> * configure which credentials should be used with which pipeline root (probably, write rules like which pipeline_root prefix/query param should use which credentials)
> * or configure credentials with pipeline root together as artifact repository (but then we should specify artifact repos, not pipeline roots)
> * or ask users to configure credentials separately from pipeline_root
>
> We probably need more thoughts on the exact config format, this seems like a complex problem.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py`
Content:
```
1 # Copyright 2020-2021 The Kubeflow Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from http.server import BaseHTTPRequestHandler, HTTPServer
16 import json
17 import os
18 import base64
19
20 kfp_version = os.environ["KFP_VERSION"]
21 disable_istio_sidecar = os.environ.get("DISABLE_ISTIO_SIDECAR") == "true"
22 mlpipeline_minio_access_key = base64.b64encode(
23 bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8')
24 mlpipeline_minio_secret_key = base64.b64encode(
25 bytes(os.environ.get("MINIO_SECRET_KEY"), 'utf-8')).decode('utf-8')
26
27
28 class Controller(BaseHTTPRequestHandler):
29 def sync(self, parent, children):
30 pipeline_enabled = parent.get("metadata", {}).get(
31 "labels", {}).get("pipelines.kubeflow.org/enabled")
32
33 if pipeline_enabled != "true":
34 return {"status": {}, "children": []}
35
36 # Compute status based on observed state.
37 desired_status = {
38 "kubeflow-pipelines-ready": \
39 len(children["Secret.v1"]) == 1 and \
40 len(children["ConfigMap.v1"]) == 1 and \
41 len(children["Deployment.apps/v1"]) == 2 and \
42 len(children["Service.v1"]) == 2 and \
43 len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and \
44 len(children["AuthorizationPolicy.security.istio.io/v1beta1"]) == 1 and \
45 "True" or "False"
46 }
47
48 # Generate the desired child object(s).
49 # parent is a namespace
50 namespace = parent.get("metadata", {}).get("name")
51 desired_resources = [
52 {
53 "apiVersion": "v1",
54 "kind": "ConfigMap",
55 "metadata": {
56 "name": "metadata-grpc-configmap",
57 "namespace": namespace,
58 },
59 "data": {
60 "METADATA_GRPC_SERVICE_HOST":
61 "metadata-grpc-service.kubeflow",
62 "METADATA_GRPC_SERVICE_PORT": "8080",
63 },
64 },
65 # Visualization server related manifests below
66 {
67 "apiVersion": "apps/v1",
68 "kind": "Deployment",
69 "metadata": {
70 "labels": {
71 "app": "ml-pipeline-visualizationserver"
72 },
73 "name": "ml-pipeline-visualizationserver",
74 "namespace": namespace,
75 },
76 "spec": {
77 "selector": {
78 "matchLabels": {
79 "app": "ml-pipeline-visualizationserver"
80 },
81 },
82 "template": {
83 "metadata": {
84 "labels": {
85 "app": "ml-pipeline-visualizationserver"
86 },
87 "annotations": disable_istio_sidecar and {
88 "sidecar.istio.io/inject": "false"
89 } or {},
90 },
91 "spec": {
92 "containers": [{
93 "image":
94 "gcr.io/ml-pipeline/visualization-server:" +
95 kfp_version,
96 "imagePullPolicy":
97 "IfNotPresent",
98 "name":
99 "ml-pipeline-visualizationserver",
100 "ports": [{
101 "containerPort": 8888
102 }],
103 "resources": {
104 "requests": {
105 "cpu": "50m",
106 "memory": "200Mi"
107 },
108 "limits": {
109 "cpu": "500m",
110 "memory": "1Gi"
111 },
112 }
113 }],
114 "serviceAccountName":
115 "default-editor",
116 },
117 },
118 },
119 },
120 {
121 "apiVersion": "networking.istio.io/v1alpha3",
122 "kind": "DestinationRule",
123 "metadata": {
124 "name": "ml-pipeline-visualizationserver",
125 "namespace": namespace,
126 },
127 "spec": {
128 "host": "ml-pipeline-visualizationserver",
129 "trafficPolicy": {
130 "tls": {
131 "mode": "ISTIO_MUTUAL"
132 }
133 }
134 }
135 },
136 {
137 "apiVersion": "security.istio.io/v1beta1",
138 "kind": "AuthorizationPolicy",
139 "metadata": {
140 "name": "ml-pipeline-visualizationserver",
141 "namespace": namespace,
142 },
143 "spec": {
144 "selector": {
145 "matchLabels": {
146 "app": "ml-pipeline-visualizationserver"
147 }
148 },
149 "rules": [{
150 "from": [{
151 "source": {
152 "principals": ["cluster.local/ns/kubeflow/sa/ml-pipeline"]
153 }
154 }]
155 }]
156 }
157 },
158 {
159 "apiVersion": "v1",
160 "kind": "Service",
161 "metadata": {
162 "name": "ml-pipeline-visualizationserver",
163 "namespace": namespace,
164 },
165 "spec": {
166 "ports": [{
167 "name": "http",
168 "port": 8888,
169 "protocol": "TCP",
170 "targetPort": 8888,
171 }],
172 "selector": {
173 "app": "ml-pipeline-visualizationserver",
174 },
175 },
176 },
177 # Artifact fetcher related resources below.
178 {
179 "apiVersion": "apps/v1",
180 "kind": "Deployment",
181 "metadata": {
182 "labels": {
183 "app": "ml-pipeline-ui-artifact"
184 },
185 "name": "ml-pipeline-ui-artifact",
186 "namespace": namespace,
187 },
188 "spec": {
189 "selector": {
190 "matchLabels": {
191 "app": "ml-pipeline-ui-artifact"
192 }
193 },
194 "template": {
195 "metadata": {
196 "labels": {
197 "app": "ml-pipeline-ui-artifact"
198 },
199 "annotations": disable_istio_sidecar and {
200 "sidecar.istio.io/inject": "false"
201 } or {},
202 },
203 "spec": {
204 "containers": [{
205 "name":
206 "ml-pipeline-ui-artifact",
207 "image":
208 "gcr.io/ml-pipeline/frontend:" + kfp_version,
209 "imagePullPolicy":
210 "IfNotPresent",
211 "ports": [{
212 "containerPort": 3000
213 }],
214 "resources": {
215 "requests": {
216 "cpu": "10m",
217 "memory": "70Mi"
218 },
219 "limits": {
220 "cpu": "100m",
221 "memory": "500Mi"
222 },
223 }
224 }],
225 "serviceAccountName":
226 "default-editor"
227 }
228 }
229 }
230 },
231 {
232 "apiVersion": "v1",
233 "kind": "Service",
234 "metadata": {
235 "name": "ml-pipeline-ui-artifact",
236 "namespace": namespace,
237 "labels": {
238 "app": "ml-pipeline-ui-artifact"
239 }
240 },
241 "spec": {
242 "ports": [{
243 "name":
244 "http", # name is required to let istio understand request protocol
245 "port": 80,
246 "protocol": "TCP",
247 "targetPort": 3000
248 }],
249 "selector": {
250 "app": "ml-pipeline-ui-artifact"
251 }
252 }
253 },
254 ]
255 print('Received request:', parent)
256 print('Desired resources except secrets:', desired_resources)
257 # Moved after the print argument because this is sensitive data.
258 desired_resources.append({
259 "apiVersion": "v1",
260 "kind": "Secret",
261 "metadata": {
262 "name": "mlpipeline-minio-artifact",
263 "namespace": namespace,
264 },
265 "data": {
266 "accesskey": mlpipeline_minio_access_key,
267 "secretkey": mlpipeline_minio_secret_key,
268 },
269 })
270
271 return {"status": desired_status, "children": desired_resources}
272
273 def do_POST(self):
274 # Serve the sync() function as a JSON webhook.
275 observed = json.loads(
276 self.rfile.read(int(self.headers.get("content-length"))))
277 desired = self.sync(observed["parent"], observed["children"])
278
279 self.send_response(200)
280 self.send_header("Content-type", "application/json")
281 self.end_headers()
282 self.wfile.write(bytes(json.dumps(desired), 'utf-8'))
283
284
285 HTTPServer(("", 8080), Controller).serve_forever()
286
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py b/manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py
--- a/manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py
+++ b/manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py
@@ -18,6 +18,8 @@
import base64
kfp_version = os.environ["KFP_VERSION"]
+# KFP_DEFAULT_PIPELINE_ROOT is optional
+kfp_default_pipeline_root = os.environ.get("KFP_DEFAULT_PIPELINE_ROOT")
disable_istio_sidecar = os.environ.get("DISABLE_ISTIO_SIDECAR") == "true"
mlpipeline_minio_access_key = base64.b64encode(
bytes(os.environ.get("MINIO_ACCESS_KEY"), 'utf-8')).decode('utf-8')
@@ -27,17 +29,35 @@
class Controller(BaseHTTPRequestHandler):
def sync(self, parent, children):
+ # parent is a namespace
+ namespace = parent.get("metadata", {}).get("name")
pipeline_enabled = parent.get("metadata", {}).get(
"labels", {}).get("pipelines.kubeflow.org/enabled")
if pipeline_enabled != "true":
return {"status": {}, "children": []}
+ desired_configmap_count = 1
+ desired_resources = []
+ if kfp_default_pipeline_root:
+ desired_configmap_count = 2
+ desired_resources += [{
+ "apiVersion": "v1",
+ "kind": "ConfigMap",
+ "metadata": {
+ "name": "kfp-launcher",
+ "namespace": namespace,
+ },
+ "data": {
+ "defaultPipelineRoot": kfp_default_pipeline_root,
+ },
+ }]
+
# Compute status based on observed state.
desired_status = {
"kubeflow-pipelines-ready": \
len(children["Secret.v1"]) == 1 and \
- len(children["ConfigMap.v1"]) == 1 and \
+ len(children["ConfigMap.v1"]) == desired_configmap_count and \
len(children["Deployment.apps/v1"]) == 2 and \
len(children["Service.v1"]) == 2 and \
len(children["DestinationRule.networking.istio.io/v1alpha3"]) == 1 and \
@@ -46,9 +66,7 @@
}
# Generate the desired child object(s).
- # parent is a namespace
- namespace = parent.get("metadata", {}).get("name")
- desired_resources = [
+ desired_resources += [
{
"apiVersion": "v1",
"kind": "ConfigMap",
| {"golden_diff": "diff --git a/manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py b/manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py\n--- a/manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py\n+++ b/manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py\n@@ -18,6 +18,8 @@\n import base64\n \n kfp_version = os.environ[\"KFP_VERSION\"]\n+# KFP_DEFAULT_PIPELINE_ROOT is optional\n+kfp_default_pipeline_root = os.environ.get(\"KFP_DEFAULT_PIPELINE_ROOT\")\n disable_istio_sidecar = os.environ.get(\"DISABLE_ISTIO_SIDECAR\") == \"true\"\n mlpipeline_minio_access_key = base64.b64encode(\n bytes(os.environ.get(\"MINIO_ACCESS_KEY\"), 'utf-8')).decode('utf-8')\n@@ -27,17 +29,35 @@\n \n class Controller(BaseHTTPRequestHandler):\n def sync(self, parent, children):\n+ # parent is a namespace\n+ namespace = parent.get(\"metadata\", {}).get(\"name\")\n pipeline_enabled = parent.get(\"metadata\", {}).get(\n \"labels\", {}).get(\"pipelines.kubeflow.org/enabled\")\n \n if pipeline_enabled != \"true\":\n return {\"status\": {}, \"children\": []}\n \n+ desired_configmap_count = 1\n+ desired_resources = []\n+ if kfp_default_pipeline_root:\n+ desired_configmap_count = 2\n+ desired_resources += [{\n+ \"apiVersion\": \"v1\",\n+ \"kind\": \"ConfigMap\",\n+ \"metadata\": {\n+ \"name\": \"kfp-launcher\",\n+ \"namespace\": namespace,\n+ },\n+ \"data\": {\n+ \"defaultPipelineRoot\": kfp_default_pipeline_root,\n+ },\n+ }]\n+\n # Compute status based on observed state.\n desired_status = {\n \"kubeflow-pipelines-ready\": \\\n len(children[\"Secret.v1\"]) == 1 and \\\n- len(children[\"ConfigMap.v1\"]) == 1 and \\\n+ len(children[\"ConfigMap.v1\"]) == desired_configmap_count and \\\n len(children[\"Deployment.apps/v1\"]) == 2 and \\\n len(children[\"Service.v1\"]) == 2 and \\\n len(children[\"DestinationRule.networking.istio.io/v1alpha3\"]) == 1 and \\\n@@ -46,9 +66,7 @@\n }\n \n # Generate the desired child object(s).\n- # parent is a namespace\n- namespace = parent.get(\"metadata\", {}).get(\"name\")\n- desired_resources = [\n+ desired_resources += [\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"ConfigMap\",\n", "issue": "[v2compat] configurable default pipeline root\nQuoted from https://github.com/kubeflow/pipelines/issues/4649#issuecomment-844035914\r\nBlocks https://github.com/kubeflow/pipelines/issues/5680\r\n\r\n> FYI, KFP v2 compatible mode has been released, see documentation: https://www.kubeflow.org/docs/components/pipelines/sdk/v2/.\r\n> \r\n> It doesn't support artifact repository configuration, this is one of the things we want to support too. So I'm posting early thoughts on this related issue.\r\n> \r\n> Let me first try to summarize requirements for configuring artifact repositories for both KFP v2 compatible and v2.\r\n> \r\n> ## Object store specific credentials requirements\r\n> For GCS, AWS S3, we suggest setting up credentials, so that they represent identity of the pipeline step, so that not only artifact upload/download, calls to other Cloud services should also use the same credentials. For this reason, we don't recommend setting credentials in artifact repository config. The suggestion is to configure the identity transparently if possible using GCP workload identity or AWS IRSA. If credentials are really necessary, they can be configured using pipeline DSL via [kfp.gcp.use_gcp_secret](https://kubeflow-pipelines.readthedocs.io/en/latest/source/kfp.extensions.html#kfp.gcp.use_gcp_secret) or [kfp.aws.use_aws_secret](https://kubeflow-pipelines.readthedocs.io/en/latest/source/kfp.extensions.html#kfp.aws.use_aws_secret) etc. These principles should apply to other Cloud Providers that has credentials that can be used with all its services.\r\n> \r\n> For on-prem object store like MinIO, the credentials do not represent an identity, they are only used to access a specified object store instance. Therefore, it's reasonable to include them in artifact repository config.\r\n> \r\n> ## In summary\r\n> * For GCS, only pipeline root needs to be configurable.\r\n> * For AWS S3, besides pipeline root, we also need region, endpoint etc to be configurable.\r\n> * For MinIO or similar on-prem object stores, besides pipeline root, we also need endpoint, credentials to be configurable.\r\n> \r\n> We cannot implement a spec for every possible object stores, so likely we should use the same spec as what [Go CDK](https://gocloud.dev/) supports or depend on cloud provider contributions.\r\n> \r\n> Go CDK supports provider specific query params to configure some things other than object key, so we might consider adopting these query params, so that pipeline root can be more expressive, so we might not need other configurations.\r\n> e.g. for S3, it's possible to configure region via a query param: https://gocloud.dev/howto/blob/#s3\r\n> \r\n> ```\r\n> s3://my-bucket?region=us-west-1\r\n> ```\r\n> \r\n> ## How we configure pipeline root, other configs and credentials?\r\n> Ideally, pipeline root can include all other configs, so that we can uniquely identify an artifact.\r\n> Ideally, credentials should be configurable transparently.\r\n> \r\n> When both ideal requirements are met, we only need to support namespace level default pipeline root. All other configurations can be done by specifying different pipeline roots.\r\n> \r\n> However, now MinIO violates the requirement that credentials can be configured transparently. Therefore, we need a mechanism to either\r\n> \r\n> * configure which credentials should be used with which pipeline root (probably, write rules like which pipeline_root prefix/query param should use which credentials)\r\n> * or configure credentials with pipeline root together as artifact repository (but then we should specify artifact repos, not pipeline roots)\r\n> * or ask users to configure credentials separately from pipeline_root\r\n> \r\n> We probably need more thoughts on the exact config format, this seems like a complex problem.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2020-2021 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport json\nimport os\nimport base64\n\nkfp_version = os.environ[\"KFP_VERSION\"]\ndisable_istio_sidecar = os.environ.get(\"DISABLE_ISTIO_SIDECAR\") == \"true\"\nmlpipeline_minio_access_key = base64.b64encode(\n bytes(os.environ.get(\"MINIO_ACCESS_KEY\"), 'utf-8')).decode('utf-8')\nmlpipeline_minio_secret_key = base64.b64encode(\n bytes(os.environ.get(\"MINIO_SECRET_KEY\"), 'utf-8')).decode('utf-8')\n\n\nclass Controller(BaseHTTPRequestHandler):\n def sync(self, parent, children):\n pipeline_enabled = parent.get(\"metadata\", {}).get(\n \"labels\", {}).get(\"pipelines.kubeflow.org/enabled\")\n\n if pipeline_enabled != \"true\":\n return {\"status\": {}, \"children\": []}\n\n # Compute status based on observed state.\n desired_status = {\n \"kubeflow-pipelines-ready\": \\\n len(children[\"Secret.v1\"]) == 1 and \\\n len(children[\"ConfigMap.v1\"]) == 1 and \\\n len(children[\"Deployment.apps/v1\"]) == 2 and \\\n len(children[\"Service.v1\"]) == 2 and \\\n len(children[\"DestinationRule.networking.istio.io/v1alpha3\"]) == 1 and \\\n len(children[\"AuthorizationPolicy.security.istio.io/v1beta1\"]) == 1 and \\\n \"True\" or \"False\"\n }\n\n # Generate the desired child object(s).\n # parent is a namespace\n namespace = parent.get(\"metadata\", {}).get(\"name\")\n desired_resources = [\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"ConfigMap\",\n \"metadata\": {\n \"name\": \"metadata-grpc-configmap\",\n \"namespace\": namespace,\n },\n \"data\": {\n \"METADATA_GRPC_SERVICE_HOST\":\n \"metadata-grpc-service.kubeflow\",\n \"METADATA_GRPC_SERVICE_PORT\": \"8080\",\n },\n },\n # Visualization server related manifests below\n {\n \"apiVersion\": \"apps/v1\",\n \"kind\": \"Deployment\",\n \"metadata\": {\n \"labels\": {\n \"app\": \"ml-pipeline-visualizationserver\"\n },\n \"name\": \"ml-pipeline-visualizationserver\",\n \"namespace\": namespace,\n },\n \"spec\": {\n \"selector\": {\n \"matchLabels\": {\n \"app\": \"ml-pipeline-visualizationserver\"\n },\n },\n \"template\": {\n \"metadata\": {\n \"labels\": {\n \"app\": \"ml-pipeline-visualizationserver\"\n },\n \"annotations\": disable_istio_sidecar and {\n \"sidecar.istio.io/inject\": \"false\"\n } or {},\n },\n \"spec\": {\n \"containers\": [{\n \"image\":\n \"gcr.io/ml-pipeline/visualization-server:\" +\n kfp_version,\n \"imagePullPolicy\":\n \"IfNotPresent\",\n \"name\":\n \"ml-pipeline-visualizationserver\",\n \"ports\": [{\n \"containerPort\": 8888\n }],\n \"resources\": {\n \"requests\": {\n \"cpu\": \"50m\",\n \"memory\": \"200Mi\"\n },\n \"limits\": {\n \"cpu\": \"500m\",\n \"memory\": \"1Gi\"\n },\n }\n }],\n \"serviceAccountName\":\n \"default-editor\",\n },\n },\n },\n },\n {\n \"apiVersion\": \"networking.istio.io/v1alpha3\",\n \"kind\": \"DestinationRule\",\n \"metadata\": {\n \"name\": \"ml-pipeline-visualizationserver\",\n \"namespace\": namespace,\n },\n \"spec\": {\n \"host\": \"ml-pipeline-visualizationserver\",\n \"trafficPolicy\": {\n \"tls\": {\n \"mode\": \"ISTIO_MUTUAL\"\n }\n }\n }\n },\n {\n \"apiVersion\": \"security.istio.io/v1beta1\",\n \"kind\": \"AuthorizationPolicy\",\n \"metadata\": {\n \"name\": \"ml-pipeline-visualizationserver\",\n \"namespace\": namespace,\n },\n \"spec\": {\n \"selector\": {\n \"matchLabels\": {\n \"app\": \"ml-pipeline-visualizationserver\"\n }\n },\n \"rules\": [{\n \"from\": [{\n \"source\": {\n \"principals\": [\"cluster.local/ns/kubeflow/sa/ml-pipeline\"]\n }\n }]\n }]\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Service\",\n \"metadata\": {\n \"name\": \"ml-pipeline-visualizationserver\",\n \"namespace\": namespace,\n },\n \"spec\": {\n \"ports\": [{\n \"name\": \"http\",\n \"port\": 8888,\n \"protocol\": \"TCP\",\n \"targetPort\": 8888,\n }],\n \"selector\": {\n \"app\": \"ml-pipeline-visualizationserver\",\n },\n },\n },\n # Artifact fetcher related resources below.\n {\n \"apiVersion\": \"apps/v1\",\n \"kind\": \"Deployment\",\n \"metadata\": {\n \"labels\": {\n \"app\": \"ml-pipeline-ui-artifact\"\n },\n \"name\": \"ml-pipeline-ui-artifact\",\n \"namespace\": namespace,\n },\n \"spec\": {\n \"selector\": {\n \"matchLabels\": {\n \"app\": \"ml-pipeline-ui-artifact\"\n }\n },\n \"template\": {\n \"metadata\": {\n \"labels\": {\n \"app\": \"ml-pipeline-ui-artifact\"\n },\n \"annotations\": disable_istio_sidecar and {\n \"sidecar.istio.io/inject\": \"false\"\n } or {},\n },\n \"spec\": {\n \"containers\": [{\n \"name\":\n \"ml-pipeline-ui-artifact\",\n \"image\":\n \"gcr.io/ml-pipeline/frontend:\" + kfp_version,\n \"imagePullPolicy\":\n \"IfNotPresent\",\n \"ports\": [{\n \"containerPort\": 3000\n }],\n \"resources\": {\n \"requests\": {\n \"cpu\": \"10m\",\n \"memory\": \"70Mi\"\n },\n \"limits\": {\n \"cpu\": \"100m\",\n \"memory\": \"500Mi\"\n },\n }\n }],\n \"serviceAccountName\":\n \"default-editor\"\n }\n }\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Service\",\n \"metadata\": {\n \"name\": \"ml-pipeline-ui-artifact\",\n \"namespace\": namespace,\n \"labels\": {\n \"app\": \"ml-pipeline-ui-artifact\"\n }\n },\n \"spec\": {\n \"ports\": [{\n \"name\":\n \"http\", # name is required to let istio understand request protocol\n \"port\": 80,\n \"protocol\": \"TCP\",\n \"targetPort\": 3000\n }],\n \"selector\": {\n \"app\": \"ml-pipeline-ui-artifact\"\n }\n }\n },\n ]\n print('Received request:', parent)\n print('Desired resources except secrets:', desired_resources)\n # Moved after the print argument because this is sensitive data.\n desired_resources.append({\n \"apiVersion\": \"v1\",\n \"kind\": \"Secret\",\n \"metadata\": {\n \"name\": \"mlpipeline-minio-artifact\",\n \"namespace\": namespace,\n },\n \"data\": {\n \"accesskey\": mlpipeline_minio_access_key,\n \"secretkey\": mlpipeline_minio_secret_key,\n },\n })\n\n return {\"status\": desired_status, \"children\": desired_resources}\n\n def do_POST(self):\n # Serve the sync() function as a JSON webhook.\n observed = json.loads(\n self.rfile.read(int(self.headers.get(\"content-length\"))))\n desired = self.sync(observed[\"parent\"], observed[\"children\"])\n\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n self.wfile.write(bytes(json.dumps(desired), 'utf-8'))\n\n\nHTTPServer((\"\", 8080), Controller).serve_forever()\n", "path": "manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py"}], "after_files": [{"content": "# Copyright 2020-2021 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport json\nimport os\nimport base64\n\nkfp_version = os.environ[\"KFP_VERSION\"]\n# KFP_DEFAULT_PIPELINE_ROOT is optional\nkfp_default_pipeline_root = os.environ.get(\"KFP_DEFAULT_PIPELINE_ROOT\")\ndisable_istio_sidecar = os.environ.get(\"DISABLE_ISTIO_SIDECAR\") == \"true\"\nmlpipeline_minio_access_key = base64.b64encode(\n bytes(os.environ.get(\"MINIO_ACCESS_KEY\"), 'utf-8')).decode('utf-8')\nmlpipeline_minio_secret_key = base64.b64encode(\n bytes(os.environ.get(\"MINIO_SECRET_KEY\"), 'utf-8')).decode('utf-8')\n\n\nclass Controller(BaseHTTPRequestHandler):\n def sync(self, parent, children):\n # parent is a namespace\n namespace = parent.get(\"metadata\", {}).get(\"name\")\n pipeline_enabled = parent.get(\"metadata\", {}).get(\n \"labels\", {}).get(\"pipelines.kubeflow.org/enabled\")\n\n if pipeline_enabled != \"true\":\n return {\"status\": {}, \"children\": []}\n\n desired_configmap_count = 1\n desired_resources = []\n if kfp_default_pipeline_root:\n desired_configmap_count = 2\n desired_resources += [{\n \"apiVersion\": \"v1\",\n \"kind\": \"ConfigMap\",\n \"metadata\": {\n \"name\": \"kfp-launcher\",\n \"namespace\": namespace,\n },\n \"data\": {\n \"defaultPipelineRoot\": kfp_default_pipeline_root,\n },\n }]\n\n # Compute status based on observed state.\n desired_status = {\n \"kubeflow-pipelines-ready\": \\\n len(children[\"Secret.v1\"]) == 1 and \\\n len(children[\"ConfigMap.v1\"]) == desired_configmap_count and \\\n len(children[\"Deployment.apps/v1\"]) == 2 and \\\n len(children[\"Service.v1\"]) == 2 and \\\n len(children[\"DestinationRule.networking.istio.io/v1alpha3\"]) == 1 and \\\n len(children[\"AuthorizationPolicy.security.istio.io/v1beta1\"]) == 1 and \\\n \"True\" or \"False\"\n }\n\n # Generate the desired child object(s).\n desired_resources += [\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"ConfigMap\",\n \"metadata\": {\n \"name\": \"metadata-grpc-configmap\",\n \"namespace\": namespace,\n },\n \"data\": {\n \"METADATA_GRPC_SERVICE_HOST\":\n \"metadata-grpc-service.kubeflow\",\n \"METADATA_GRPC_SERVICE_PORT\": \"8080\",\n },\n },\n # Visualization server related manifests below\n {\n \"apiVersion\": \"apps/v1\",\n \"kind\": \"Deployment\",\n \"metadata\": {\n \"labels\": {\n \"app\": \"ml-pipeline-visualizationserver\"\n },\n \"name\": \"ml-pipeline-visualizationserver\",\n \"namespace\": namespace,\n },\n \"spec\": {\n \"selector\": {\n \"matchLabels\": {\n \"app\": \"ml-pipeline-visualizationserver\"\n },\n },\n \"template\": {\n \"metadata\": {\n \"labels\": {\n \"app\": \"ml-pipeline-visualizationserver\"\n },\n \"annotations\": disable_istio_sidecar and {\n \"sidecar.istio.io/inject\": \"false\"\n } or {},\n },\n \"spec\": {\n \"containers\": [{\n \"image\":\n \"gcr.io/ml-pipeline/visualization-server:\" +\n kfp_version,\n \"imagePullPolicy\":\n \"IfNotPresent\",\n \"name\":\n \"ml-pipeline-visualizationserver\",\n \"ports\": [{\n \"containerPort\": 8888\n }],\n \"resources\": {\n \"requests\": {\n \"cpu\": \"50m\",\n \"memory\": \"200Mi\"\n },\n \"limits\": {\n \"cpu\": \"500m\",\n \"memory\": \"1Gi\"\n },\n }\n }],\n \"serviceAccountName\":\n \"default-editor\",\n },\n },\n },\n },\n {\n \"apiVersion\": \"networking.istio.io/v1alpha3\",\n \"kind\": \"DestinationRule\",\n \"metadata\": {\n \"name\": \"ml-pipeline-visualizationserver\",\n \"namespace\": namespace,\n },\n \"spec\": {\n \"host\": \"ml-pipeline-visualizationserver\",\n \"trafficPolicy\": {\n \"tls\": {\n \"mode\": \"ISTIO_MUTUAL\"\n }\n }\n }\n },\n {\n \"apiVersion\": \"security.istio.io/v1beta1\",\n \"kind\": \"AuthorizationPolicy\",\n \"metadata\": {\n \"name\": \"ml-pipeline-visualizationserver\",\n \"namespace\": namespace,\n },\n \"spec\": {\n \"selector\": {\n \"matchLabels\": {\n \"app\": \"ml-pipeline-visualizationserver\"\n }\n },\n \"rules\": [{\n \"from\": [{\n \"source\": {\n \"principals\": [\"cluster.local/ns/kubeflow/sa/ml-pipeline\"]\n }\n }]\n }]\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Service\",\n \"metadata\": {\n \"name\": \"ml-pipeline-visualizationserver\",\n \"namespace\": namespace,\n },\n \"spec\": {\n \"ports\": [{\n \"name\": \"http\",\n \"port\": 8888,\n \"protocol\": \"TCP\",\n \"targetPort\": 8888,\n }],\n \"selector\": {\n \"app\": \"ml-pipeline-visualizationserver\",\n },\n },\n },\n # Artifact fetcher related resources below.\n {\n \"apiVersion\": \"apps/v1\",\n \"kind\": \"Deployment\",\n \"metadata\": {\n \"labels\": {\n \"app\": \"ml-pipeline-ui-artifact\"\n },\n \"name\": \"ml-pipeline-ui-artifact\",\n \"namespace\": namespace,\n },\n \"spec\": {\n \"selector\": {\n \"matchLabels\": {\n \"app\": \"ml-pipeline-ui-artifact\"\n }\n },\n \"template\": {\n \"metadata\": {\n \"labels\": {\n \"app\": \"ml-pipeline-ui-artifact\"\n },\n \"annotations\": disable_istio_sidecar and {\n \"sidecar.istio.io/inject\": \"false\"\n } or {},\n },\n \"spec\": {\n \"containers\": [{\n \"name\":\n \"ml-pipeline-ui-artifact\",\n \"image\":\n \"gcr.io/ml-pipeline/frontend:\" + kfp_version,\n \"imagePullPolicy\":\n \"IfNotPresent\",\n \"ports\": [{\n \"containerPort\": 3000\n }],\n \"resources\": {\n \"requests\": {\n \"cpu\": \"10m\",\n \"memory\": \"70Mi\"\n },\n \"limits\": {\n \"cpu\": \"100m\",\n \"memory\": \"500Mi\"\n },\n }\n }],\n \"serviceAccountName\":\n \"default-editor\"\n }\n }\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Service\",\n \"metadata\": {\n \"name\": \"ml-pipeline-ui-artifact\",\n \"namespace\": namespace,\n \"labels\": {\n \"app\": \"ml-pipeline-ui-artifact\"\n }\n },\n \"spec\": {\n \"ports\": [{\n \"name\":\n \"http\", # name is required to let istio understand request protocol\n \"port\": 80,\n \"protocol\": \"TCP\",\n \"targetPort\": 3000\n }],\n \"selector\": {\n \"app\": \"ml-pipeline-ui-artifact\"\n }\n }\n },\n ]\n print('Received request:', parent)\n print('Desired resources except secrets:', desired_resources)\n # Moved after the print argument because this is sensitive data.\n desired_resources.append({\n \"apiVersion\": \"v1\",\n \"kind\": \"Secret\",\n \"metadata\": {\n \"name\": \"mlpipeline-minio-artifact\",\n \"namespace\": namespace,\n },\n \"data\": {\n \"accesskey\": mlpipeline_minio_access_key,\n \"secretkey\": mlpipeline_minio_secret_key,\n },\n })\n\n return {\"status\": desired_status, \"children\": desired_resources}\n\n def do_POST(self):\n # Serve the sync() function as a JSON webhook.\n observed = json.loads(\n self.rfile.read(int(self.headers.get(\"content-length\"))))\n desired = self.sync(observed[\"parent\"], observed[\"children\"])\n\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n self.wfile.write(bytes(json.dumps(desired), 'utf-8'))\n\n\nHTTPServer((\"\", 8080), Controller).serve_forever()\n", "path": "manifests/kustomize/base/installs/multi-user/pipelines-profile-controller/sync.py"}]} | 3,794 | 617 |
gh_patches_debug_27762 | rasdani/github-patches | git_diff | pypi__warehouse-6297 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Database is out of sync with models
Autogenerating a migration from 0abc1b27707bdf9a1ca94ba8392f0086b1eeb492 produces some differences that Alembic is attempting to rectify:
Running:
```
$ docker-compose run web python -m warehouse db revision --autogenerate --message "Sync"
```
Produces:
```python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sync
Revision ID: 71ccb4f929b0
Revises: 5ea52744d154
Create Date: 2019-07-26 17:00:45.297526
"""
from alembic import op
import sqlalchemy as sa
revision = '71ccb4f929b0'
down_revision = '5ea52744d154'
# Note: It is VERY important to ensure that a migration does not lock for a
# long period of time and to ensure that each individual migration does
# not break compatibility with the *previous* version of the code base.
# This is because the migrations will be ran automatically as part of the
# deployment process, but while the previous version of the code is still
# up and running. Thus backwards incompatible changes must be broken up
# over multiple migrations inside of multiple pull requests in order to
# phase them in over multiple deploys.
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint('_user_macaroons_description_uc', 'macaroons', ['description', 'user_id'])
op.drop_index('user_security_keys_label_key', table_name='user_security_keys')
op.create_unique_constraint('user_security_keys_label_key', 'user_security_keys', ['label'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('user_security_keys_label_key', 'user_security_keys', type_='unique')
op.create_index('user_security_keys_label_key', 'user_security_keys', ['user_id'], unique=False)
op.drop_constraint('_user_macaroons_description_uc', 'macaroons', type_='unique')
# ### end Alembic commands ###
```
Ideally both `upgrade` and `downgrade` would be no-ops here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/migrations/versions/48def930fcfd_webauthn_and_macaroon_constraints.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12 """
13 WebAuthn and Macaroon constraints
14
15 Revision ID: 48def930fcfd
16 Revises: 5ea52744d154
17 Create Date: 2019-07-26 17:55:41.802528
18 """
19
20 from alembic import op
21
22 revision = "48def930fcfd"
23 down_revision = "5ea52744d154"
24
25
26 def upgrade():
27 op.create_unique_constraint(
28 "_user_macaroons_description_uc", "macaroons", ["description", "user_id"]
29 )
30 op.drop_index("user_security_keys_label_key", table_name="user_security_keys")
31 op.create_unique_constraint(
32 "user_security_keys_label_key", "user_security_keys", ["label"]
33 )
34
35
36 def downgrade():
37 op.drop_constraint(
38 "user_security_keys_label_key", "user_security_keys", type_="unique"
39 )
40 op.create_index(
41 "user_security_keys_label_key", "user_security_keys", ["user_id"], unique=False
42 )
43 op.drop_constraint("_user_macaroons_description_uc", "macaroons", type_="unique")
44
```
Path: `warehouse/accounts/models.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import enum
14
15 from citext import CIText
16 from sqlalchemy import (
17 Binary,
18 Boolean,
19 CheckConstraint,
20 Column,
21 DateTime,
22 Enum,
23 ForeignKey,
24 Index,
25 Integer,
26 String,
27 UniqueConstraint,
28 orm,
29 select,
30 sql,
31 )
32 from sqlalchemy.dialects.postgresql import UUID
33 from sqlalchemy.ext.hybrid import hybrid_property
34 from sqlalchemy.orm.exc import NoResultFound
35
36 from warehouse import db
37 from warehouse.sitemap.models import SitemapMixin
38 from warehouse.utils.attrs import make_repr
39
40
41 class UserFactory:
42 def __init__(self, request):
43 self.request = request
44
45 def __getitem__(self, username):
46 try:
47 return self.request.db.query(User).filter(User.username == username).one()
48 except NoResultFound:
49 raise KeyError from None
50
51
52 class DisableReason(enum.Enum):
53
54 CompromisedPassword = "password compromised"
55
56
57 class User(SitemapMixin, db.Model):
58
59 __tablename__ = "users"
60 __table_args__ = (
61 CheckConstraint("length(username) <= 50", name="users_valid_username_length"),
62 CheckConstraint(
63 "username ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'",
64 name="users_valid_username",
65 ),
66 )
67
68 __repr__ = make_repr("username")
69
70 username = Column(CIText, nullable=False, unique=True)
71 name = Column(String(length=100), nullable=False)
72 password = Column(String(length=128), nullable=False)
73 password_date = Column(DateTime, nullable=True, server_default=sql.func.now())
74 is_active = Column(Boolean, nullable=False, server_default=sql.false())
75 is_superuser = Column(Boolean, nullable=False, server_default=sql.false())
76 is_moderator = Column(Boolean, nullable=False, server_default=sql.false())
77 date_joined = Column(DateTime, server_default=sql.func.now())
78 last_login = Column(DateTime, nullable=False, server_default=sql.func.now())
79 disabled_for = Column(
80 Enum(DisableReason, values_callable=lambda x: [e.value for e in x]),
81 nullable=True,
82 )
83
84 totp_secret = Column(Binary(length=20), nullable=True)
85
86 webauthn = orm.relationship(
87 "WebAuthn", backref="user", cascade="all, delete-orphan", lazy=False
88 )
89
90 emails = orm.relationship(
91 "Email", backref="user", cascade="all, delete-orphan", lazy=False
92 )
93
94 macaroons = orm.relationship(
95 "Macaroon", backref="user", cascade="all, delete-orphan", lazy=False
96 )
97
98 @property
99 def primary_email(self):
100 primaries = [x for x in self.emails if x.primary]
101 if primaries:
102 return primaries[0]
103
104 @hybrid_property
105 def email(self):
106 primary_email = self.primary_email
107 return primary_email.email if primary_email else None
108
109 @email.expression
110 def email(self):
111 return (
112 select([Email.email])
113 .where((Email.user_id == self.id) & (Email.primary.is_(True)))
114 .as_scalar()
115 )
116
117 @property
118 def has_two_factor(self):
119 return self.totp_secret is not None or len(self.webauthn) > 0
120
121 @property
122 def has_primary_verified_email(self):
123 return self.primary_email is not None and self.primary_email.verified
124
125
126 class WebAuthn(db.Model):
127 __tablename__ = "user_security_keys"
128 __table_args__ = (
129 UniqueConstraint("label", name="user_security_keys_label_key"),
130 Index("user_security_keys_label_key", "user_id"),
131 )
132
133 user_id = Column(
134 UUID(as_uuid=True),
135 ForeignKey("users.id", deferrable=True, initially="DEFERRED"),
136 nullable=False,
137 )
138 label = Column(String, nullable=False)
139 credential_id = Column(String, unique=True, nullable=False)
140 public_key = Column(String, unique=True, nullable=True)
141 sign_count = Column(Integer, default=0)
142
143
144 class UnverifyReasons(enum.Enum):
145
146 SpamComplaint = "spam complaint"
147 HardBounce = "hard bounce"
148 SoftBounce = "soft bounce"
149
150
151 class Email(db.ModelBase):
152
153 __tablename__ = "user_emails"
154 __table_args__ = (
155 UniqueConstraint("email", name="user_emails_email_key"),
156 Index("user_emails_user_id", "user_id"),
157 )
158
159 id = Column(Integer, primary_key=True, nullable=False)
160 user_id = Column(
161 UUID(as_uuid=True),
162 ForeignKey("users.id", deferrable=True, initially="DEFERRED"),
163 nullable=False,
164 )
165 email = Column(String(length=254), nullable=False)
166 primary = Column(Boolean, nullable=False)
167 verified = Column(Boolean, nullable=False)
168
169 # Deliverability information
170 unverify_reason = Column(
171 Enum(UnverifyReasons, values_callable=lambda x: [e.value for e in x]),
172 nullable=True,
173 )
174 transient_bounces = Column(Integer, nullable=False, server_default=sql.text("0"))
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/accounts/models.py b/warehouse/accounts/models.py
--- a/warehouse/accounts/models.py
+++ b/warehouse/accounts/models.py
@@ -126,8 +126,7 @@
class WebAuthn(db.Model):
__tablename__ = "user_security_keys"
__table_args__ = (
- UniqueConstraint("label", name="user_security_keys_label_key"),
- Index("user_security_keys_label_key", "user_id"),
+ UniqueConstraint("label", "user_id", name="_user_security_keys_label_uc"),
)
user_id = Column(
diff --git a/warehouse/migrations/versions/48def930fcfd_webauthn_and_macaroon_constraints.py b/warehouse/migrations/versions/48def930fcfd_webauthn_and_macaroon_constraints.py
--- a/warehouse/migrations/versions/48def930fcfd_webauthn_and_macaroon_constraints.py
+++ b/warehouse/migrations/versions/48def930fcfd_webauthn_and_macaroon_constraints.py
@@ -27,17 +27,17 @@
op.create_unique_constraint(
"_user_macaroons_description_uc", "macaroons", ["description", "user_id"]
)
- op.drop_index("user_security_keys_label_key", table_name="user_security_keys")
op.create_unique_constraint(
- "user_security_keys_label_key", "user_security_keys", ["label"]
+ "_user_security_keys_label_uc", "user_security_keys", ["label", "user_id"]
)
+ op.drop_index("user_security_keys_label_key", table_name="user_security_keys")
def downgrade():
- op.drop_constraint(
- "user_security_keys_label_key", "user_security_keys", type_="unique"
- )
op.create_index(
"user_security_keys_label_key", "user_security_keys", ["user_id"], unique=False
)
+ op.drop_constraint(
+ "_user_security_keys_label_uc", "user_security_keys", type_="unique"
+ )
op.drop_constraint("_user_macaroons_description_uc", "macaroons", type_="unique")
| {"golden_diff": "diff --git a/warehouse/accounts/models.py b/warehouse/accounts/models.py\n--- a/warehouse/accounts/models.py\n+++ b/warehouse/accounts/models.py\n@@ -126,8 +126,7 @@\n class WebAuthn(db.Model):\n __tablename__ = \"user_security_keys\"\n __table_args__ = (\n- UniqueConstraint(\"label\", name=\"user_security_keys_label_key\"),\n- Index(\"user_security_keys_label_key\", \"user_id\"),\n+ UniqueConstraint(\"label\", \"user_id\", name=\"_user_security_keys_label_uc\"),\n )\n \n user_id = Column(\ndiff --git a/warehouse/migrations/versions/48def930fcfd_webauthn_and_macaroon_constraints.py b/warehouse/migrations/versions/48def930fcfd_webauthn_and_macaroon_constraints.py\n--- a/warehouse/migrations/versions/48def930fcfd_webauthn_and_macaroon_constraints.py\n+++ b/warehouse/migrations/versions/48def930fcfd_webauthn_and_macaroon_constraints.py\n@@ -27,17 +27,17 @@\n op.create_unique_constraint(\n \"_user_macaroons_description_uc\", \"macaroons\", [\"description\", \"user_id\"]\n )\n- op.drop_index(\"user_security_keys_label_key\", table_name=\"user_security_keys\")\n op.create_unique_constraint(\n- \"user_security_keys_label_key\", \"user_security_keys\", [\"label\"]\n+ \"_user_security_keys_label_uc\", \"user_security_keys\", [\"label\", \"user_id\"]\n )\n+ op.drop_index(\"user_security_keys_label_key\", table_name=\"user_security_keys\")\n \n \n def downgrade():\n- op.drop_constraint(\n- \"user_security_keys_label_key\", \"user_security_keys\", type_=\"unique\"\n- )\n op.create_index(\n \"user_security_keys_label_key\", \"user_security_keys\", [\"user_id\"], unique=False\n )\n+ op.drop_constraint(\n+ \"_user_security_keys_label_uc\", \"user_security_keys\", type_=\"unique\"\n+ )\n op.drop_constraint(\"_user_macaroons_description_uc\", \"macaroons\", type_=\"unique\")\n", "issue": "Database is out of sync with models\nAutogenerating a migration from 0abc1b27707bdf9a1ca94ba8392f0086b1eeb492 produces some differences that Alembic is attempting to rectify:\r\n\r\nRunning:\r\n```\r\n$ docker-compose run web python -m warehouse db revision --autogenerate --message \"Sync\"\r\n```\r\n\r\nProduces:\r\n```python\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"\r\nSync\r\n\r\nRevision ID: 71ccb4f929b0\r\nRevises: 5ea52744d154\r\nCreate Date: 2019-07-26 17:00:45.297526\r\n\"\"\"\r\n\r\nfrom alembic import op\r\nimport sqlalchemy as sa\r\n\r\n\r\nrevision = '71ccb4f929b0'\r\ndown_revision = '5ea52744d154'\r\n\r\n# Note: It is VERY important to ensure that a migration does not lock for a\r\n# long period of time and to ensure that each individual migration does\r\n# not break compatibility with the *previous* version of the code base.\r\n# This is because the migrations will be ran automatically as part of the\r\n# deployment process, but while the previous version of the code is still\r\n# up and running. Thus backwards incompatible changes must be broken up\r\n# over multiple migrations inside of multiple pull requests in order to\r\n# phase them in over multiple deploys.\r\n\r\ndef upgrade():\r\n # ### commands auto generated by Alembic - please adjust! ###\r\n op.create_unique_constraint('_user_macaroons_description_uc', 'macaroons', ['description', 'user_id'])\r\n op.drop_index('user_security_keys_label_key', table_name='user_security_keys')\r\n op.create_unique_constraint('user_security_keys_label_key', 'user_security_keys', ['label'])\r\n # ### end Alembic commands ###\r\n\r\n\r\ndef downgrade():\r\n # ### commands auto generated by Alembic - please adjust! ###\r\n op.drop_constraint('user_security_keys_label_key', 'user_security_keys', type_='unique')\r\n op.create_index('user_security_keys_label_key', 'user_security_keys', ['user_id'], unique=False)\r\n op.drop_constraint('_user_macaroons_description_uc', 'macaroons', type_='unique')\r\n # ### end Alembic commands ### \r\n```\r\n\r\nIdeally both `upgrade` and `downgrade` would be no-ops here.\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nWebAuthn and Macaroon constraints\n\nRevision ID: 48def930fcfd\nRevises: 5ea52744d154\nCreate Date: 2019-07-26 17:55:41.802528\n\"\"\"\n\nfrom alembic import op\n\nrevision = \"48def930fcfd\"\ndown_revision = \"5ea52744d154\"\n\n\ndef upgrade():\n op.create_unique_constraint(\n \"_user_macaroons_description_uc\", \"macaroons\", [\"description\", \"user_id\"]\n )\n op.drop_index(\"user_security_keys_label_key\", table_name=\"user_security_keys\")\n op.create_unique_constraint(\n \"user_security_keys_label_key\", \"user_security_keys\", [\"label\"]\n )\n\n\ndef downgrade():\n op.drop_constraint(\n \"user_security_keys_label_key\", \"user_security_keys\", type_=\"unique\"\n )\n op.create_index(\n \"user_security_keys_label_key\", \"user_security_keys\", [\"user_id\"], unique=False\n )\n op.drop_constraint(\"_user_macaroons_description_uc\", \"macaroons\", type_=\"unique\")\n", "path": "warehouse/migrations/versions/48def930fcfd_webauthn_and_macaroon_constraints.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport enum\n\nfrom citext import CIText\nfrom sqlalchemy import (\n Binary,\n Boolean,\n CheckConstraint,\n Column,\n DateTime,\n Enum,\n ForeignKey,\n Index,\n Integer,\n String,\n UniqueConstraint,\n orm,\n select,\n sql,\n)\nfrom sqlalchemy.dialects.postgresql import UUID\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse import db\nfrom warehouse.sitemap.models import SitemapMixin\nfrom warehouse.utils.attrs import make_repr\n\n\nclass UserFactory:\n def __init__(self, request):\n self.request = request\n\n def __getitem__(self, username):\n try:\n return self.request.db.query(User).filter(User.username == username).one()\n except NoResultFound:\n raise KeyError from None\n\n\nclass DisableReason(enum.Enum):\n\n CompromisedPassword = \"password compromised\"\n\n\nclass User(SitemapMixin, db.Model):\n\n __tablename__ = \"users\"\n __table_args__ = (\n CheckConstraint(\"length(username) <= 50\", name=\"users_valid_username_length\"),\n CheckConstraint(\n \"username ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'\",\n name=\"users_valid_username\",\n ),\n )\n\n __repr__ = make_repr(\"username\")\n\n username = Column(CIText, nullable=False, unique=True)\n name = Column(String(length=100), nullable=False)\n password = Column(String(length=128), nullable=False)\n password_date = Column(DateTime, nullable=True, server_default=sql.func.now())\n is_active = Column(Boolean, nullable=False, server_default=sql.false())\n is_superuser = Column(Boolean, nullable=False, server_default=sql.false())\n is_moderator = Column(Boolean, nullable=False, server_default=sql.false())\n date_joined = Column(DateTime, server_default=sql.func.now())\n last_login = Column(DateTime, nullable=False, server_default=sql.func.now())\n disabled_for = Column(\n Enum(DisableReason, values_callable=lambda x: [e.value for e in x]),\n nullable=True,\n )\n\n totp_secret = Column(Binary(length=20), nullable=True)\n\n webauthn = orm.relationship(\n \"WebAuthn\", backref=\"user\", cascade=\"all, delete-orphan\", lazy=False\n )\n\n emails = orm.relationship(\n \"Email\", backref=\"user\", cascade=\"all, delete-orphan\", lazy=False\n )\n\n macaroons = orm.relationship(\n \"Macaroon\", backref=\"user\", cascade=\"all, delete-orphan\", lazy=False\n )\n\n @property\n def primary_email(self):\n primaries = [x for x in self.emails if x.primary]\n if primaries:\n return primaries[0]\n\n @hybrid_property\n def email(self):\n primary_email = self.primary_email\n return primary_email.email if primary_email else None\n\n @email.expression\n def email(self):\n return (\n select([Email.email])\n .where((Email.user_id == self.id) & (Email.primary.is_(True)))\n .as_scalar()\n )\n\n @property\n def has_two_factor(self):\n return self.totp_secret is not None or len(self.webauthn) > 0\n\n @property\n def has_primary_verified_email(self):\n return self.primary_email is not None and self.primary_email.verified\n\n\nclass WebAuthn(db.Model):\n __tablename__ = \"user_security_keys\"\n __table_args__ = (\n UniqueConstraint(\"label\", name=\"user_security_keys_label_key\"),\n Index(\"user_security_keys_label_key\", \"user_id\"),\n )\n\n user_id = Column(\n UUID(as_uuid=True),\n ForeignKey(\"users.id\", deferrable=True, initially=\"DEFERRED\"),\n nullable=False,\n )\n label = Column(String, nullable=False)\n credential_id = Column(String, unique=True, nullable=False)\n public_key = Column(String, unique=True, nullable=True)\n sign_count = Column(Integer, default=0)\n\n\nclass UnverifyReasons(enum.Enum):\n\n SpamComplaint = \"spam complaint\"\n HardBounce = \"hard bounce\"\n SoftBounce = \"soft bounce\"\n\n\nclass Email(db.ModelBase):\n\n __tablename__ = \"user_emails\"\n __table_args__ = (\n UniqueConstraint(\"email\", name=\"user_emails_email_key\"),\n Index(\"user_emails_user_id\", \"user_id\"),\n )\n\n id = Column(Integer, primary_key=True, nullable=False)\n user_id = Column(\n UUID(as_uuid=True),\n ForeignKey(\"users.id\", deferrable=True, initially=\"DEFERRED\"),\n nullable=False,\n )\n email = Column(String(length=254), nullable=False)\n primary = Column(Boolean, nullable=False)\n verified = Column(Boolean, nullable=False)\n\n # Deliverability information\n unverify_reason = Column(\n Enum(UnverifyReasons, values_callable=lambda x: [e.value for e in x]),\n nullable=True,\n )\n transient_bounces = Column(Integer, nullable=False, server_default=sql.text(\"0\"))\n", "path": "warehouse/accounts/models.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nWebAuthn and Macaroon constraints\n\nRevision ID: 48def930fcfd\nRevises: 5ea52744d154\nCreate Date: 2019-07-26 17:55:41.802528\n\"\"\"\n\nfrom alembic import op\n\nrevision = \"48def930fcfd\"\ndown_revision = \"5ea52744d154\"\n\n\ndef upgrade():\n op.create_unique_constraint(\n \"_user_macaroons_description_uc\", \"macaroons\", [\"description\", \"user_id\"]\n )\n op.create_unique_constraint(\n \"_user_security_keys_label_uc\", \"user_security_keys\", [\"label\", \"user_id\"]\n )\n op.drop_index(\"user_security_keys_label_key\", table_name=\"user_security_keys\")\n\n\ndef downgrade():\n op.create_index(\n \"user_security_keys_label_key\", \"user_security_keys\", [\"user_id\"], unique=False\n )\n op.drop_constraint(\n \"_user_security_keys_label_uc\", \"user_security_keys\", type_=\"unique\"\n )\n op.drop_constraint(\"_user_macaroons_description_uc\", \"macaroons\", type_=\"unique\")\n", "path": "warehouse/migrations/versions/48def930fcfd_webauthn_and_macaroon_constraints.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport enum\n\nfrom citext import CIText\nfrom sqlalchemy import (\n Binary,\n Boolean,\n CheckConstraint,\n Column,\n DateTime,\n Enum,\n ForeignKey,\n Index,\n Integer,\n String,\n UniqueConstraint,\n orm,\n select,\n sql,\n)\nfrom sqlalchemy.dialects.postgresql import UUID\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse import db\nfrom warehouse.sitemap.models import SitemapMixin\nfrom warehouse.utils.attrs import make_repr\n\n\nclass UserFactory:\n def __init__(self, request):\n self.request = request\n\n def __getitem__(self, username):\n try:\n return self.request.db.query(User).filter(User.username == username).one()\n except NoResultFound:\n raise KeyError from None\n\n\nclass DisableReason(enum.Enum):\n\n CompromisedPassword = \"password compromised\"\n\n\nclass User(SitemapMixin, db.Model):\n\n __tablename__ = \"users\"\n __table_args__ = (\n CheckConstraint(\"length(username) <= 50\", name=\"users_valid_username_length\"),\n CheckConstraint(\n \"username ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'\",\n name=\"users_valid_username\",\n ),\n )\n\n __repr__ = make_repr(\"username\")\n\n username = Column(CIText, nullable=False, unique=True)\n name = Column(String(length=100), nullable=False)\n password = Column(String(length=128), nullable=False)\n password_date = Column(DateTime, nullable=True, server_default=sql.func.now())\n is_active = Column(Boolean, nullable=False, server_default=sql.false())\n is_superuser = Column(Boolean, nullable=False, server_default=sql.false())\n is_moderator = Column(Boolean, nullable=False, server_default=sql.false())\n date_joined = Column(DateTime, server_default=sql.func.now())\n last_login = Column(DateTime, nullable=False, server_default=sql.func.now())\n disabled_for = Column(\n Enum(DisableReason, values_callable=lambda x: [e.value for e in x]),\n nullable=True,\n )\n\n totp_secret = Column(Binary(length=20), nullable=True)\n\n webauthn = orm.relationship(\n \"WebAuthn\", backref=\"user\", cascade=\"all, delete-orphan\", lazy=False\n )\n\n emails = orm.relationship(\n \"Email\", backref=\"user\", cascade=\"all, delete-orphan\", lazy=False\n )\n\n macaroons = orm.relationship(\n \"Macaroon\", backref=\"user\", cascade=\"all, delete-orphan\", lazy=False\n )\n\n @property\n def primary_email(self):\n primaries = [x for x in self.emails if x.primary]\n if primaries:\n return primaries[0]\n\n @hybrid_property\n def email(self):\n primary_email = self.primary_email\n return primary_email.email if primary_email else None\n\n @email.expression\n def email(self):\n return (\n select([Email.email])\n .where((Email.user_id == self.id) & (Email.primary.is_(True)))\n .as_scalar()\n )\n\n @property\n def has_two_factor(self):\n return self.totp_secret is not None or len(self.webauthn) > 0\n\n @property\n def has_primary_verified_email(self):\n return self.primary_email is not None and self.primary_email.verified\n\n\nclass WebAuthn(db.Model):\n __tablename__ = \"user_security_keys\"\n __table_args__ = (\n UniqueConstraint(\"label\", \"user_id\", name=\"_user_security_keys_label_uc\"),\n )\n\n user_id = Column(\n UUID(as_uuid=True),\n ForeignKey(\"users.id\", deferrable=True, initially=\"DEFERRED\"),\n nullable=False,\n )\n label = Column(String, nullable=False)\n credential_id = Column(String, unique=True, nullable=False)\n public_key = Column(String, unique=True, nullable=True)\n sign_count = Column(Integer, default=0)\n\n\nclass UnverifyReasons(enum.Enum):\n\n SpamComplaint = \"spam complaint\"\n HardBounce = \"hard bounce\"\n SoftBounce = \"soft bounce\"\n\n\nclass Email(db.ModelBase):\n\n __tablename__ = \"user_emails\"\n __table_args__ = (\n UniqueConstraint(\"email\", name=\"user_emails_email_key\"),\n Index(\"user_emails_user_id\", \"user_id\"),\n )\n\n id = Column(Integer, primary_key=True, nullable=False)\n user_id = Column(\n UUID(as_uuid=True),\n ForeignKey(\"users.id\", deferrable=True, initially=\"DEFERRED\"),\n nullable=False,\n )\n email = Column(String(length=254), nullable=False)\n primary = Column(Boolean, nullable=False)\n verified = Column(Boolean, nullable=False)\n\n # Deliverability information\n unverify_reason = Column(\n Enum(UnverifyReasons, values_callable=lambda x: [e.value for e in x]),\n nullable=True,\n )\n transient_bounces = Column(Integer, nullable=False, server_default=sql.text(\"0\"))\n", "path": "warehouse/accounts/models.py"}]} | 3,060 | 471 |
gh_patches_debug_60843 | rasdani/github-patches | git_diff | doccano__doccano-1670 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Database table for SpanType has invalid name
How to reproduce the behaviour
---------
- Pull latest changes from master
- ./manage.py migrate
- ./api/migrations/0033_auto_20220127_0654.py will migrate the database table for `SpanType` to `label_types_spanType`
- Delete a project `Project.objects.first().delete()``
Exception:
<img width="511" alt="image" src="https://user-images.githubusercontent.com/6747788/152384221-a6a549b8-1cca-49c0-86e4-6a20f7d0a266.png">
The issue can be resolved by either renaming db table `label_types_spanType` to `label_types_spantype` or by explicitly setting `tb_table` for SpanType model like this: `db_table = "label_types_spanType"`
Your Environment
---------
* Operating System: macOS Monterey, doccano is locally executed
* Python Version Used: 3.9
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/api/migrations/0033_auto_20220127_0654.py`
Content:
```
1 # Generated by Django 3.2.11 on 2022-01-27 06:54
2
3 from django.db import migrations
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('labels', '0003_auto_20220127_0654'),
10 ('api', '0032_auto_20220127_0654'),
11 ]
12
13 operations = [
14 migrations.SeparateDatabaseAndState(
15 state_operations=[
16 migrations.DeleteModel(
17 name='CategoryType',
18 ),
19 migrations.DeleteModel(
20 name='RelationTypes',
21 ),
22 migrations.DeleteModel(
23 name='SpanType',
24 ),
25 ],
26 database_operations=[
27 migrations.AlterModelTable(
28 name='CategoryType',
29 table='label_types_categorytype'
30 ),
31 migrations.AlterModelTable(
32 name='RelationTypes',
33 table='label_types_relationtypes'
34 ),
35 migrations.AlterModelTable(
36 name='SpanType',
37 table='label_types_spanType'
38 )
39 ]
40 )
41 ]
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/api/migrations/0033_auto_20220127_0654.py b/backend/api/migrations/0033_auto_20220127_0654.py
--- a/backend/api/migrations/0033_auto_20220127_0654.py
+++ b/backend/api/migrations/0033_auto_20220127_0654.py
@@ -34,7 +34,7 @@
),
migrations.AlterModelTable(
name='SpanType',
- table='label_types_spanType'
+ table='label_types_spantype'
)
]
)
| {"golden_diff": "diff --git a/backend/api/migrations/0033_auto_20220127_0654.py b/backend/api/migrations/0033_auto_20220127_0654.py\n--- a/backend/api/migrations/0033_auto_20220127_0654.py\n+++ b/backend/api/migrations/0033_auto_20220127_0654.py\n@@ -34,7 +34,7 @@\n ),\n migrations.AlterModelTable(\n name='SpanType',\n- table='label_types_spanType'\n+ table='label_types_spantype'\n )\n ]\n )\n", "issue": "Database table for SpanType has invalid name \nHow to reproduce the behaviour\r\n---------\r\n- Pull latest changes from master\r\n- ./manage.py migrate\r\n- ./api/migrations/0033_auto_20220127_0654.py will migrate the database table for `SpanType` to `label_types_spanType`\r\n- Delete a project `Project.objects.first().delete()``\r\n\r\nException:\r\n\r\n<img width=\"511\" alt=\"image\" src=\"https://user-images.githubusercontent.com/6747788/152384221-a6a549b8-1cca-49c0-86e4-6a20f7d0a266.png\">\r\n \r\nThe issue can be resolved by either renaming db table `label_types_spanType` to `label_types_spantype` or by explicitly setting `tb_table` for SpanType model like this: `db_table = \"label_types_spanType\"`\r\n\r\nYour Environment\r\n---------\r\n* Operating System: macOS Monterey, doccano is locally executed\r\n* Python Version Used: 3.9\r\n\n", "before_files": [{"content": "# Generated by Django 3.2.11 on 2022-01-27 06:54\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('labels', '0003_auto_20220127_0654'),\n ('api', '0032_auto_20220127_0654'),\n ]\n\n operations = [\n migrations.SeparateDatabaseAndState(\n state_operations=[\n migrations.DeleteModel(\n name='CategoryType',\n ),\n migrations.DeleteModel(\n name='RelationTypes',\n ),\n migrations.DeleteModel(\n name='SpanType',\n ),\n ],\n database_operations=[\n migrations.AlterModelTable(\n name='CategoryType',\n table='label_types_categorytype'\n ),\n migrations.AlterModelTable(\n name='RelationTypes',\n table='label_types_relationtypes'\n ),\n migrations.AlterModelTable(\n name='SpanType',\n table='label_types_spanType'\n )\n ]\n )\n ]\n", "path": "backend/api/migrations/0033_auto_20220127_0654.py"}], "after_files": [{"content": "# Generated by Django 3.2.11 on 2022-01-27 06:54\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('labels', '0003_auto_20220127_0654'),\n ('api', '0032_auto_20220127_0654'),\n ]\n\n operations = [\n migrations.SeparateDatabaseAndState(\n state_operations=[\n migrations.DeleteModel(\n name='CategoryType',\n ),\n migrations.DeleteModel(\n name='RelationTypes',\n ),\n migrations.DeleteModel(\n name='SpanType',\n ),\n ],\n database_operations=[\n migrations.AlterModelTable(\n name='CategoryType',\n table='label_types_categorytype'\n ),\n migrations.AlterModelTable(\n name='RelationTypes',\n table='label_types_relationtypes'\n ),\n migrations.AlterModelTable(\n name='SpanType',\n table='label_types_spantype'\n )\n ]\n )\n ]\n", "path": "backend/api/migrations/0033_auto_20220127_0654.py"}]} | 835 | 163 |
gh_patches_debug_29475 | rasdani/github-patches | git_diff | litestar-org__litestar-2259 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py`
Content:
```
1 import json
2 from pathlib import Path
3 from typing import Any
4
5 from rich import get_console
6 from sqlalchemy import create_engine
7 from sqlalchemy.orm import Mapped, Session, sessionmaker
8
9 from litestar.contrib.sqlalchemy.base import UUIDBase
10 from litestar.contrib.sqlalchemy.repository import SQLAlchemySyncRepository
11 from litestar.repository.filters import LimitOffset
12
13 here = Path(__file__).parent
14 console = get_console()
15
16
17 class USState(UUIDBase):
18 # you can optionally override the generated table name by manually setting it.
19 __tablename__ = "us_state_lookup" # type: ignore[assignment]
20 abbreviation: Mapped[str]
21 name: Mapped[str]
22
23
24 class USStateRepository(SQLAlchemySyncRepository[USState]):
25 """US State repository."""
26
27 model_type = USState
28
29
30 engine = create_engine(
31 "duckdb:///:memory:",
32 future=True,
33 )
34 session_factory: sessionmaker[Session] = sessionmaker(engine, expire_on_commit=False)
35
36
37 def open_fixture(fixtures_path: Path, fixture_name: str) -> Any:
38 """Loads JSON file with the specified fixture name
39
40 Args:
41 fixtures_path (Path): The path to look for fixtures
42 fixture_name (str): The fixture name to load.
43
44 Raises:
45 FileNotFoundError: Fixtures not found.
46
47 Returns:
48 Any: The parsed JSON data
49 """
50 fixture = Path(fixtures_path / f"{fixture_name}.json")
51 if fixture.exists():
52 with fixture.open(mode="r", encoding="utf-8") as f:
53 f_data = f.read()
54 return json.loads(f_data)
55 raise FileNotFoundError(f"Could not find the {fixture_name} fixture")
56
57
58 def run_script() -> None:
59 """Load data from a fixture."""
60
61 # Initializes the database.
62 with engine.begin() as conn:
63 USState.metadata.create_all(conn)
64
65 with session_factory() as db_session:
66 # 1) load the JSON data into the US States table
67 repo = USStateRepository(session=db_session)
68 fixture = open_fixture(here, USStateRepository.model_type.__tablename__) # type: ignore
69 objs = repo.add_many([USStateRepository.model_type(**raw_obj) for raw_obj in fixture])
70 db_session.commit()
71 console.print(f"Created {len(objs)} new objects.")
72
73 # 2) Select paginated data and total row count.
74 created_objs, total_objs = repo.list_and_count(LimitOffset(limit=10, offset=0))
75 console.print(f"Selected {len(created_objs)} records out of a total of {total_objs}.")
76
77 # 2) Let's remove the batch of records selected.
78 deleted_objs = repo.delete_many([new_obj.id for new_obj in created_objs])
79 console.print(f"Removed {len(deleted_objs)} records out of a total of {total_objs}.")
80
81 # 3) Le'ts count the remaining rows
82 remaining_count = repo.count()
83 console.print(f"Found {remaining_count} remaining records after delete.")
84
85
86 if __name__ == "__main__":
87 run_script()
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py b/docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py
--- a/docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py
+++ b/docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py
@@ -63,7 +63,7 @@
USState.metadata.create_all(conn)
with session_factory() as db_session:
- # 1) load the JSON data into the US States table
+ # 1) Load the JSON data into the US States table.
repo = USStateRepository(session=db_session)
fixture = open_fixture(here, USStateRepository.model_type.__tablename__) # type: ignore
objs = repo.add_many([USStateRepository.model_type(**raw_obj) for raw_obj in fixture])
@@ -74,11 +74,11 @@
created_objs, total_objs = repo.list_and_count(LimitOffset(limit=10, offset=0))
console.print(f"Selected {len(created_objs)} records out of a total of {total_objs}.")
- # 2) Let's remove the batch of records selected.
+ # 3) Let's remove the batch of records selected.
deleted_objs = repo.delete_many([new_obj.id for new_obj in created_objs])
console.print(f"Removed {len(deleted_objs)} records out of a total of {total_objs}.")
- # 3) Le'ts count the remaining rows
+ # 4) Let's count the remaining rows
remaining_count = repo.count()
console.print(f"Found {remaining_count} remaining records after delete.")
| {"golden_diff": "diff --git a/docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py b/docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py\n--- a/docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py\n+++ b/docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py\n@@ -63,7 +63,7 @@\n USState.metadata.create_all(conn)\n \n with session_factory() as db_session:\n- # 1) load the JSON data into the US States table\n+ # 1) Load the JSON data into the US States table.\n repo = USStateRepository(session=db_session)\n fixture = open_fixture(here, USStateRepository.model_type.__tablename__) # type: ignore\n objs = repo.add_many([USStateRepository.model_type(**raw_obj) for raw_obj in fixture])\n@@ -74,11 +74,11 @@\n created_objs, total_objs = repo.list_and_count(LimitOffset(limit=10, offset=0))\n console.print(f\"Selected {len(created_objs)} records out of a total of {total_objs}.\")\n \n- # 2) Let's remove the batch of records selected.\n+ # 3) Let's remove the batch of records selected.\n deleted_objs = repo.delete_many([new_obj.id for new_obj in created_objs])\n console.print(f\"Removed {len(deleted_objs)} records out of a total of {total_objs}.\")\n \n- # 3) Le'ts count the remaining rows\n+ # 4) Let's count the remaining rows\n remaining_count = repo.count()\n console.print(f\"Found {remaining_count} remaining records after delete.\")\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "import json\nfrom pathlib import Path\nfrom typing import Any\n\nfrom rich import get_console\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import Mapped, Session, sessionmaker\n\nfrom litestar.contrib.sqlalchemy.base import UUIDBase\nfrom litestar.contrib.sqlalchemy.repository import SQLAlchemySyncRepository\nfrom litestar.repository.filters import LimitOffset\n\nhere = Path(__file__).parent\nconsole = get_console()\n\n\nclass USState(UUIDBase):\n # you can optionally override the generated table name by manually setting it.\n __tablename__ = \"us_state_lookup\" # type: ignore[assignment]\n abbreviation: Mapped[str]\n name: Mapped[str]\n\n\nclass USStateRepository(SQLAlchemySyncRepository[USState]):\n \"\"\"US State repository.\"\"\"\n\n model_type = USState\n\n\nengine = create_engine(\n \"duckdb:///:memory:\",\n future=True,\n)\nsession_factory: sessionmaker[Session] = sessionmaker(engine, expire_on_commit=False)\n\n\ndef open_fixture(fixtures_path: Path, fixture_name: str) -> Any:\n \"\"\"Loads JSON file with the specified fixture name\n\n Args:\n fixtures_path (Path): The path to look for fixtures\n fixture_name (str): The fixture name to load.\n\n Raises:\n FileNotFoundError: Fixtures not found.\n\n Returns:\n Any: The parsed JSON data\n \"\"\"\n fixture = Path(fixtures_path / f\"{fixture_name}.json\")\n if fixture.exists():\n with fixture.open(mode=\"r\", encoding=\"utf-8\") as f:\n f_data = f.read()\n return json.loads(f_data)\n raise FileNotFoundError(f\"Could not find the {fixture_name} fixture\")\n\n\ndef run_script() -> None:\n \"\"\"Load data from a fixture.\"\"\"\n\n # Initializes the database.\n with engine.begin() as conn:\n USState.metadata.create_all(conn)\n\n with session_factory() as db_session:\n # 1) load the JSON data into the US States table\n repo = USStateRepository(session=db_session)\n fixture = open_fixture(here, USStateRepository.model_type.__tablename__) # type: ignore\n objs = repo.add_many([USStateRepository.model_type(**raw_obj) for raw_obj in fixture])\n db_session.commit()\n console.print(f\"Created {len(objs)} new objects.\")\n\n # 2) Select paginated data and total row count.\n created_objs, total_objs = repo.list_and_count(LimitOffset(limit=10, offset=0))\n console.print(f\"Selected {len(created_objs)} records out of a total of {total_objs}.\")\n\n # 2) Let's remove the batch of records selected.\n deleted_objs = repo.delete_many([new_obj.id for new_obj in created_objs])\n console.print(f\"Removed {len(deleted_objs)} records out of a total of {total_objs}.\")\n\n # 3) Le'ts count the remaining rows\n remaining_count = repo.count()\n console.print(f\"Found {remaining_count} remaining records after delete.\")\n\n\nif __name__ == \"__main__\":\n run_script()\n", "path": "docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py"}], "after_files": [{"content": "import json\nfrom pathlib import Path\nfrom typing import Any\n\nfrom rich import get_console\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import Mapped, Session, sessionmaker\n\nfrom litestar.contrib.sqlalchemy.base import UUIDBase\nfrom litestar.contrib.sqlalchemy.repository import SQLAlchemySyncRepository\nfrom litestar.repository.filters import LimitOffset\n\nhere = Path(__file__).parent\nconsole = get_console()\n\n\nclass USState(UUIDBase):\n # you can optionally override the generated table name by manually setting it.\n __tablename__ = \"us_state_lookup\" # type: ignore[assignment]\n abbreviation: Mapped[str]\n name: Mapped[str]\n\n\nclass USStateRepository(SQLAlchemySyncRepository[USState]):\n \"\"\"US State repository.\"\"\"\n\n model_type = USState\n\n\nengine = create_engine(\n \"duckdb:///:memory:\",\n future=True,\n)\nsession_factory: sessionmaker[Session] = sessionmaker(engine, expire_on_commit=False)\n\n\ndef open_fixture(fixtures_path: Path, fixture_name: str) -> Any:\n \"\"\"Loads JSON file with the specified fixture name\n\n Args:\n fixtures_path (Path): The path to look for fixtures\n fixture_name (str): The fixture name to load.\n\n Raises:\n FileNotFoundError: Fixtures not found.\n\n Returns:\n Any: The parsed JSON data\n \"\"\"\n fixture = Path(fixtures_path / f\"{fixture_name}.json\")\n if fixture.exists():\n with fixture.open(mode=\"r\", encoding=\"utf-8\") as f:\n f_data = f.read()\n return json.loads(f_data)\n raise FileNotFoundError(f\"Could not find the {fixture_name} fixture\")\n\n\ndef run_script() -> None:\n \"\"\"Load data from a fixture.\"\"\"\n\n # Initializes the database.\n with engine.begin() as conn:\n USState.metadata.create_all(conn)\n\n with session_factory() as db_session:\n # 1) Load the JSON data into the US States table.\n repo = USStateRepository(session=db_session)\n fixture = open_fixture(here, USStateRepository.model_type.__tablename__) # type: ignore\n objs = repo.add_many([USStateRepository.model_type(**raw_obj) for raw_obj in fixture])\n db_session.commit()\n console.print(f\"Created {len(objs)} new objects.\")\n\n # 2) Select paginated data and total row count.\n created_objs, total_objs = repo.list_and_count(LimitOffset(limit=10, offset=0))\n console.print(f\"Selected {len(created_objs)} records out of a total of {total_objs}.\")\n\n # 3) Let's remove the batch of records selected.\n deleted_objs = repo.delete_many([new_obj.id for new_obj in created_objs])\n console.print(f\"Removed {len(deleted_objs)} records out of a total of {total_objs}.\")\n\n # 4) Let's count the remaining rows\n remaining_count = repo.count()\n console.print(f\"Found {remaining_count} remaining records after delete.\")\n\n\nif __name__ == \"__main__\":\n run_script()\n", "path": "docs/examples/contrib/sqlalchemy/sqlalchemy_repository_bulk_operations.py"}]} | 1,263 | 357 |
gh_patches_debug_25431 | rasdani/github-patches | git_diff | lutris__lutris-994 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Auto update latest DXVK versions
https://github.com/lutris/lutris/blob/525c84d9da173e84fd5585eed6b9d2fef5bef2b2/lutris/util/dxvk.py#L11-L12
Instead of writing static versions, use GitHub API to get the latest versions of DXVK:
```bash
curl -s https://api.github.com/repos/doitsujin/dxvk/tags | jq '[.[] | .name] | join(", ")'
"v0.62, v0.61, v0.60, v0.54, v0.53, v0.52, v0.51, v0.50, v0.42, v0.41, v0.40, v0.31, v0.30, v0.21, v0.20"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/util/dxvk.py`
Content:
```
1 """DXVK helper module"""
2 import os
3 import time
4 import shutil
5
6 from lutris.settings import RUNTIME_DIR
7 from lutris.util.log import logger
8 from lutris.util.extract import extract_archive
9 from lutris.util.downloader import Downloader
10
11 DXVK_LATEST = "0.52"
12 DXVK_PAST_RELEASES = ["0.51", "0.50", "0.42", "0.31", "0.21"]
13
14
15 class DXVKManager:
16 """Utility class to install DXVK dlls to a Wine prefix"""
17 base_url = "https://github.com/doitsujin/dxvk/releases/download/v{}/dxvk-{}.tar.gz"
18 base_dir = os.path.join(RUNTIME_DIR, 'dxvk')
19 dxvk_dlls = ('dxgi', 'd3d11')
20 latest_version = DXVK_LATEST
21
22 def __init__(self, prefix, arch='win64', version=None):
23 self.prefix = prefix
24 if not os.path.isdir(self.base_dir):
25 os.makedirs(self.base_dir)
26 self._version = version
27 self.wine_arch = arch
28
29 @property
30 def version(self):
31 """Return version of DXVK (latest known version if not provided)"""
32 if self._version:
33 return self._version
34 return self.latest_version
35
36 @property
37 def dxvk_path(self):
38 """Return path to DXVK local cache"""
39 return os.path.join(self.base_dir, self.version)
40
41 @staticmethod
42 def is_dxvk_dll(dll_path):
43 """Check if a given DLL path is provided by DXVK
44
45 Very basic check to see if a dll exists and is over 1MB. If this is the
46 case, then consider the DLL to be from DXVK
47 """
48 if os.path.exists(dll_path):
49 dll_stats = os.stat(dll_path)
50 dll_size = dll_stats.st_size
51 else:
52 dll_size = 0
53 return dll_size > 1024 * 1024
54
55 def is_available(self):
56 """Return whether DXVK is cached locally"""
57 return os.path.exists(self.dxvk_path)
58
59 def download(self):
60 """Download DXVK to the local cache"""
61 # There's a glitch in one of the archive's names
62 fixed_version = 'v0.40' if self.version == '0.40' else self.version
63 dxvk_url = self.base_url.format(self.version, fixed_version)
64 if self.is_available():
65 logger.warning("DXVK already available at %s", self.dxvk_path)
66
67 dxvk_archive_path = os.path.join(self.base_dir, os.path.basename(dxvk_url))
68 downloader = Downloader(dxvk_url, dxvk_archive_path)
69 downloader.start()
70 while downloader.check_progress() < 1:
71 time.sleep(1)
72 if not os.path.exists(dxvk_archive_path):
73 logger.error("DXVK %s not downloaded")
74 return
75 if os.stat(dxvk_archive_path).st_size:
76 extract_archive(dxvk_archive_path, self.dxvk_path, merge_single=True)
77 else:
78 logger.error("%s is an empty file", self.dxvk_path)
79 os.remove(dxvk_archive_path)
80
81 def enable_dxvk_dll(self, system_dir, dxvk_arch, dll):
82 """Copies DXVK dlls to the appropriate destination"""
83 wine_dll_path = os.path.join(system_dir, '%s.dll' % dll)
84 logger.info("Replacing %s/%s with DXVK version", system_dir, dll)
85 if not self.is_dxvk_dll(wine_dll_path):
86 # Backing up original version (may not be needed)
87 if os.path.exists(wine_dll_path):
88 shutil.move(wine_dll_path, wine_dll_path + ".orig")
89 # Copying DXVK's version
90 dxvk_dll_path = os.path.join(self.dxvk_path, dxvk_arch, "%s.dll" % dll)
91 shutil.copy(dxvk_dll_path, wine_dll_path)
92
93 def disable_dxvk_dll(self, system_dir, dxvk_arch, dll):
94 """Remove DXVK DLL from Wine prefix"""
95 wine_dll_path = os.path.join(system_dir, '%s.dll' % dll)
96 if self.is_dxvk_dll(wine_dll_path):
97 logger.info("Removing DXVK dll %s/%s", system_dir, dll)
98 os.remove(wine_dll_path)
99 # Restoring original version (may not be needed)
100 if os.path.exists(wine_dll_path + '.orig'):
101 shutil.move(wine_dll_path + '.orig', wine_dll_path)
102
103 def _iter_dxvk_dlls(self):
104 windows_path = os.path.join(self.prefix, 'drive_c/windows')
105 if self.wine_arch == 'win64':
106 system_dirs = {
107 'x64': os.path.join(windows_path, 'system32'),
108 'x32': os.path.join(windows_path, 'syswow64')
109 }
110 elif self.wine_arch == 'win32':
111 system_dirs = {
112 'x32': os.path.join(windows_path, 'system32'),
113 }
114
115 for dxvk_arch, system_dir in system_dirs.items():
116 for dll in self.dxvk_dlls:
117 yield system_dir, dxvk_arch, dll
118
119 def enable(self):
120 """Enable DXVK for the current prefix"""
121 if not os.path.exists(self.dxvk_path):
122 logger.error("DXVK %s is not availble locally" % self.version)
123 return
124 for system_dir, dxvk_arch, dll in self._iter_dxvk_dlls():
125 self.enable_dxvk_dll(system_dir, dxvk_arch, dll)
126
127 def disable(self):
128 """Disable DXVK for the current prefix"""
129 for system_dir, dxvk_arch, dll in self._iter_dxvk_dlls():
130 self.disable_dxvk_dll(system_dir, dxvk_arch, dll)
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lutris/util/dxvk.py b/lutris/util/dxvk.py
--- a/lutris/util/dxvk.py
+++ b/lutris/util/dxvk.py
@@ -1,16 +1,41 @@
"""DXVK helper module"""
import os
+import json
import time
import shutil
+import urllib.request
from lutris.settings import RUNTIME_DIR
from lutris.util.log import logger
from lutris.util.extract import extract_archive
from lutris.util.downloader import Downloader
-DXVK_LATEST = "0.52"
-DXVK_PAST_RELEASES = ["0.51", "0.50", "0.42", "0.31", "0.21"]
+def get_latest_dxvk_versions():
+ """Get latest DXVK versions from GitHub"""
+ dxvk_url = "https://api.github.com/repos/doitsujin/dxvk/tags"
+ cache = os.path.join(RUNTIME_DIR, 'dxvk', 'dxvk_versions.json')
+
+ # If the DXVK cache does not exist then download it
+ if not os.path.exists(cache):
+ urllib.request.urlretrieve(dxvk_url, cache)
+
+ # Re-download DXVK versions cache if more than a day old
+ if os.path.getmtime(cache)+86400 < time.time():
+ urllib.request.urlretrieve(dxvk_url, cache)
+
+ with open(cache, "r") as f:
+ dxvk_json = json.load(f)
+ DXVK_LATEST = dxvk_json[0]['name'].replace('v','')
+ DXVK_PAST_RELEASES = [x['name'].replace('v', '') for x in dxvk_json][1:]
+
+ return DXVK_LATEST, DXVK_PAST_RELEASES
+
+try:
+ DXVK_LATEST, DXVK_PAST_RELEASES = get_latest_dxvk_versions()
+except:
+ DXVK_LATEST = "0.52"
+ DXVK_PAST_RELEASES = ["0.51", "0.50", "0.42", "0.31", "0.21"]
class DXVKManager:
"""Utility class to install DXVK dlls to a Wine prefix"""
| {"golden_diff": "diff --git a/lutris/util/dxvk.py b/lutris/util/dxvk.py\n--- a/lutris/util/dxvk.py\n+++ b/lutris/util/dxvk.py\n@@ -1,16 +1,41 @@\n \"\"\"DXVK helper module\"\"\"\n import os\n+import json\n import time\n import shutil\n+import urllib.request\n \n from lutris.settings import RUNTIME_DIR\n from lutris.util.log import logger\n from lutris.util.extract import extract_archive\n from lutris.util.downloader import Downloader\n \n-DXVK_LATEST = \"0.52\"\n-DXVK_PAST_RELEASES = [\"0.51\", \"0.50\", \"0.42\", \"0.31\", \"0.21\"]\n \n+def get_latest_dxvk_versions():\n+ \"\"\"Get latest DXVK versions from GitHub\"\"\"\n+ dxvk_url = \"https://api.github.com/repos/doitsujin/dxvk/tags\"\n+ cache = os.path.join(RUNTIME_DIR, 'dxvk', 'dxvk_versions.json')\n+\n+ # If the DXVK cache does not exist then download it\n+ if not os.path.exists(cache):\n+ urllib.request.urlretrieve(dxvk_url, cache)\n+\n+ # Re-download DXVK versions cache if more than a day old\n+ if os.path.getmtime(cache)+86400 < time.time():\n+ urllib.request.urlretrieve(dxvk_url, cache)\n+\n+ with open(cache, \"r\") as f:\n+ dxvk_json = json.load(f)\n+ DXVK_LATEST = dxvk_json[0]['name'].replace('v','')\n+ DXVK_PAST_RELEASES = [x['name'].replace('v', '') for x in dxvk_json][1:]\n+\n+ return DXVK_LATEST, DXVK_PAST_RELEASES\n+\n+try:\n+ DXVK_LATEST, DXVK_PAST_RELEASES = get_latest_dxvk_versions()\n+except:\n+ DXVK_LATEST = \"0.52\"\n+ DXVK_PAST_RELEASES = [\"0.51\", \"0.50\", \"0.42\", \"0.31\", \"0.21\"]\n \n class DXVKManager:\n \"\"\"Utility class to install DXVK dlls to a Wine prefix\"\"\"\n", "issue": "Auto update latest DXVK versions\nhttps://github.com/lutris/lutris/blob/525c84d9da173e84fd5585eed6b9d2fef5bef2b2/lutris/util/dxvk.py#L11-L12\r\n\r\nInstead of writing static versions, use GitHub API to get the latest versions of DXVK:\r\n```bash\r\ncurl -s https://api.github.com/repos/doitsujin/dxvk/tags | jq '[.[] | .name] | join(\", \")'\r\n\"v0.62, v0.61, v0.60, v0.54, v0.53, v0.52, v0.51, v0.50, v0.42, v0.41, v0.40, v0.31, v0.30, v0.21, v0.20\"\r\n```\n", "before_files": [{"content": "\"\"\"DXVK helper module\"\"\"\nimport os\nimport time\nimport shutil\n\nfrom lutris.settings import RUNTIME_DIR\nfrom lutris.util.log import logger\nfrom lutris.util.extract import extract_archive\nfrom lutris.util.downloader import Downloader\n\nDXVK_LATEST = \"0.52\"\nDXVK_PAST_RELEASES = [\"0.51\", \"0.50\", \"0.42\", \"0.31\", \"0.21\"]\n\n\nclass DXVKManager:\n \"\"\"Utility class to install DXVK dlls to a Wine prefix\"\"\"\n base_url = \"https://github.com/doitsujin/dxvk/releases/download/v{}/dxvk-{}.tar.gz\"\n base_dir = os.path.join(RUNTIME_DIR, 'dxvk')\n dxvk_dlls = ('dxgi', 'd3d11')\n latest_version = DXVK_LATEST\n\n def __init__(self, prefix, arch='win64', version=None):\n self.prefix = prefix\n if not os.path.isdir(self.base_dir):\n os.makedirs(self.base_dir)\n self._version = version\n self.wine_arch = arch\n\n @property\n def version(self):\n \"\"\"Return version of DXVK (latest known version if not provided)\"\"\"\n if self._version:\n return self._version\n return self.latest_version\n\n @property\n def dxvk_path(self):\n \"\"\"Return path to DXVK local cache\"\"\"\n return os.path.join(self.base_dir, self.version)\n\n @staticmethod\n def is_dxvk_dll(dll_path):\n \"\"\"Check if a given DLL path is provided by DXVK\n\n Very basic check to see if a dll exists and is over 1MB. If this is the\n case, then consider the DLL to be from DXVK\n \"\"\"\n if os.path.exists(dll_path):\n dll_stats = os.stat(dll_path)\n dll_size = dll_stats.st_size\n else:\n dll_size = 0\n return dll_size > 1024 * 1024\n\n def is_available(self):\n \"\"\"Return whether DXVK is cached locally\"\"\"\n return os.path.exists(self.dxvk_path)\n\n def download(self):\n \"\"\"Download DXVK to the local cache\"\"\"\n # There's a glitch in one of the archive's names\n fixed_version = 'v0.40' if self.version == '0.40' else self.version\n dxvk_url = self.base_url.format(self.version, fixed_version)\n if self.is_available():\n logger.warning(\"DXVK already available at %s\", self.dxvk_path)\n\n dxvk_archive_path = os.path.join(self.base_dir, os.path.basename(dxvk_url))\n downloader = Downloader(dxvk_url, dxvk_archive_path)\n downloader.start()\n while downloader.check_progress() < 1:\n time.sleep(1)\n if not os.path.exists(dxvk_archive_path):\n logger.error(\"DXVK %s not downloaded\")\n return\n if os.stat(dxvk_archive_path).st_size:\n extract_archive(dxvk_archive_path, self.dxvk_path, merge_single=True)\n else:\n logger.error(\"%s is an empty file\", self.dxvk_path)\n os.remove(dxvk_archive_path)\n\n def enable_dxvk_dll(self, system_dir, dxvk_arch, dll):\n \"\"\"Copies DXVK dlls to the appropriate destination\"\"\"\n wine_dll_path = os.path.join(system_dir, '%s.dll' % dll)\n logger.info(\"Replacing %s/%s with DXVK version\", system_dir, dll)\n if not self.is_dxvk_dll(wine_dll_path):\n # Backing up original version (may not be needed)\n if os.path.exists(wine_dll_path):\n shutil.move(wine_dll_path, wine_dll_path + \".orig\")\n # Copying DXVK's version\n dxvk_dll_path = os.path.join(self.dxvk_path, dxvk_arch, \"%s.dll\" % dll)\n shutil.copy(dxvk_dll_path, wine_dll_path)\n\n def disable_dxvk_dll(self, system_dir, dxvk_arch, dll):\n \"\"\"Remove DXVK DLL from Wine prefix\"\"\"\n wine_dll_path = os.path.join(system_dir, '%s.dll' % dll)\n if self.is_dxvk_dll(wine_dll_path):\n logger.info(\"Removing DXVK dll %s/%s\", system_dir, dll)\n os.remove(wine_dll_path)\n # Restoring original version (may not be needed)\n if os.path.exists(wine_dll_path + '.orig'):\n shutil.move(wine_dll_path + '.orig', wine_dll_path)\n\n def _iter_dxvk_dlls(self):\n windows_path = os.path.join(self.prefix, 'drive_c/windows')\n if self.wine_arch == 'win64':\n system_dirs = {\n 'x64': os.path.join(windows_path, 'system32'),\n 'x32': os.path.join(windows_path, 'syswow64')\n }\n elif self.wine_arch == 'win32':\n system_dirs = {\n 'x32': os.path.join(windows_path, 'system32'),\n }\n\n for dxvk_arch, system_dir in system_dirs.items():\n for dll in self.dxvk_dlls:\n yield system_dir, dxvk_arch, dll\n\n def enable(self):\n \"\"\"Enable DXVK for the current prefix\"\"\"\n if not os.path.exists(self.dxvk_path):\n logger.error(\"DXVK %s is not availble locally\" % self.version)\n return\n for system_dir, dxvk_arch, dll in self._iter_dxvk_dlls():\n self.enable_dxvk_dll(system_dir, dxvk_arch, dll)\n\n def disable(self):\n \"\"\"Disable DXVK for the current prefix\"\"\"\n for system_dir, dxvk_arch, dll in self._iter_dxvk_dlls():\n self.disable_dxvk_dll(system_dir, dxvk_arch, dll)\n", "path": "lutris/util/dxvk.py"}], "after_files": [{"content": "\"\"\"DXVK helper module\"\"\"\nimport os\nimport json\nimport time\nimport shutil\nimport urllib.request\n\nfrom lutris.settings import RUNTIME_DIR\nfrom lutris.util.log import logger\nfrom lutris.util.extract import extract_archive\nfrom lutris.util.downloader import Downloader\n\n\ndef get_latest_dxvk_versions():\n \"\"\"Get latest DXVK versions from GitHub\"\"\"\n dxvk_url = \"https://api.github.com/repos/doitsujin/dxvk/tags\"\n cache = os.path.join(RUNTIME_DIR, 'dxvk', 'dxvk_versions.json')\n\n # If the DXVK cache does not exist then download it\n if not os.path.exists(cache):\n urllib.request.urlretrieve(dxvk_url, cache)\n\n # Re-download DXVK versions cache if more than a day old\n if os.path.getmtime(cache)+86400 < time.time():\n urllib.request.urlretrieve(dxvk_url, cache)\n\n with open(cache, \"r\") as f:\n dxvk_json = json.load(f)\n DXVK_LATEST = dxvk_json[0]['name'].replace('v','')\n DXVK_PAST_RELEASES = [x['name'].replace('v', '') for x in dxvk_json][1:]\n\n return DXVK_LATEST, DXVK_PAST_RELEASES\n\ntry:\n DXVK_LATEST, DXVK_PAST_RELEASES = get_latest_dxvk_versions()\nexcept:\n DXVK_LATEST = \"0.52\"\n DXVK_PAST_RELEASES = [\"0.51\", \"0.50\", \"0.42\", \"0.31\", \"0.21\"]\n\nclass DXVKManager:\n \"\"\"Utility class to install DXVK dlls to a Wine prefix\"\"\"\n base_url = \"https://github.com/doitsujin/dxvk/releases/download/v{}/dxvk-{}.tar.gz\"\n base_dir = os.path.join(RUNTIME_DIR, 'dxvk')\n dxvk_dlls = ('dxgi', 'd3d11')\n latest_version = DXVK_LATEST\n\n def __init__(self, prefix, arch='win64', version=None):\n self.prefix = prefix\n if not os.path.isdir(self.base_dir):\n os.makedirs(self.base_dir)\n self._version = version\n self.wine_arch = arch\n\n @property\n def version(self):\n \"\"\"Return version of DXVK (latest known version if not provided)\"\"\"\n if self._version:\n return self._version\n return self.latest_version\n\n @property\n def dxvk_path(self):\n \"\"\"Return path to DXVK local cache\"\"\"\n return os.path.join(self.base_dir, self.version)\n\n @staticmethod\n def is_dxvk_dll(dll_path):\n \"\"\"Check if a given DLL path is provided by DXVK\n\n Very basic check to see if a dll exists and is over 1MB. If this is the\n case, then consider the DLL to be from DXVK\n \"\"\"\n if os.path.exists(dll_path):\n dll_stats = os.stat(dll_path)\n dll_size = dll_stats.st_size\n else:\n dll_size = 0\n return dll_size > 1024 * 1024\n\n def is_available(self):\n \"\"\"Return whether DXVK is cached locally\"\"\"\n return os.path.exists(self.dxvk_path)\n\n def download(self):\n \"\"\"Download DXVK to the local cache\"\"\"\n # There's a glitch in one of the archive's names\n fixed_version = 'v0.40' if self.version == '0.40' else self.version\n dxvk_url = self.base_url.format(self.version, fixed_version)\n if self.is_available():\n logger.warning(\"DXVK already available at %s\", self.dxvk_path)\n\n dxvk_archive_path = os.path.join(self.base_dir, os.path.basename(dxvk_url))\n downloader = Downloader(dxvk_url, dxvk_archive_path)\n downloader.start()\n while downloader.check_progress() < 1:\n time.sleep(1)\n if not os.path.exists(dxvk_archive_path):\n logger.error(\"DXVK %s not downloaded\")\n return\n if os.stat(dxvk_archive_path).st_size:\n extract_archive(dxvk_archive_path, self.dxvk_path, merge_single=True)\n else:\n logger.error(\"%s is an empty file\", self.dxvk_path)\n os.remove(dxvk_archive_path)\n\n def enable_dxvk_dll(self, system_dir, dxvk_arch, dll):\n \"\"\"Copies DXVK dlls to the appropriate destination\"\"\"\n wine_dll_path = os.path.join(system_dir, '%s.dll' % dll)\n logger.info(\"Replacing %s/%s with DXVK version\", system_dir, dll)\n if not self.is_dxvk_dll(wine_dll_path):\n # Backing up original version (may not be needed)\n if os.path.exists(wine_dll_path):\n shutil.move(wine_dll_path, wine_dll_path + \".orig\")\n # Copying DXVK's version\n dxvk_dll_path = os.path.join(self.dxvk_path, dxvk_arch, \"%s.dll\" % dll)\n shutil.copy(dxvk_dll_path, wine_dll_path)\n\n def disable_dxvk_dll(self, system_dir, dxvk_arch, dll):\n \"\"\"Remove DXVK DLL from Wine prefix\"\"\"\n wine_dll_path = os.path.join(system_dir, '%s.dll' % dll)\n if self.is_dxvk_dll(wine_dll_path):\n logger.info(\"Removing DXVK dll %s/%s\", system_dir, dll)\n os.remove(wine_dll_path)\n # Restoring original version (may not be needed)\n if os.path.exists(wine_dll_path + '.orig'):\n shutil.move(wine_dll_path + '.orig', wine_dll_path)\n\n def _iter_dxvk_dlls(self):\n windows_path = os.path.join(self.prefix, 'drive_c/windows')\n if self.wine_arch == 'win64':\n system_dirs = {\n 'x64': os.path.join(windows_path, 'system32'),\n 'x32': os.path.join(windows_path, 'syswow64')\n }\n elif self.wine_arch == 'win32':\n system_dirs = {\n 'x32': os.path.join(windows_path, 'system32'),\n }\n\n for dxvk_arch, system_dir in system_dirs.items():\n for dll in self.dxvk_dlls:\n yield system_dir, dxvk_arch, dll\n\n def enable(self):\n \"\"\"Enable DXVK for the current prefix\"\"\"\n if not os.path.exists(self.dxvk_path):\n logger.error(\"DXVK %s is not availble locally\" % self.version)\n return\n for system_dir, dxvk_arch, dll in self._iter_dxvk_dlls():\n self.enable_dxvk_dll(system_dir, dxvk_arch, dll)\n\n def disable(self):\n \"\"\"Disable DXVK for the current prefix\"\"\"\n for system_dir, dxvk_arch, dll in self._iter_dxvk_dlls():\n self.disable_dxvk_dll(system_dir, dxvk_arch, dll)\n", "path": "lutris/util/dxvk.py"}]} | 2,039 | 501 |
gh_patches_debug_5624 | rasdani/github-patches | git_diff | Textualize__textual-2981 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`Switch` should stop the `Click` event from bubbling
At the moment `Switch` handles `Click` but then lets it bubble; there's no good reason to do that and it also stops the ability to write something like this:
```python
from textual.app import App, ComposeResult
from textual.containers import Horizontal
from textual.widgets import Header, Footer, Label, Switch
class LabeledSwitch( Horizontal ):
def on_click( self ) -> None:
self.query_one(Switch).toggle()
class ClickableLabelApp( App[ None ] ):
def compose( self ) -> ComposeResult:
yield Header()
with LabeledSwitch():
yield Label( "Click me!" )
yield Switch()
yield Footer()
if __name__ == "__main__":
ClickableLabelApp().run()
```
where the idea is to make a compound widget that lets you click on the `Label` or the `Switch` and the `Switch` will toggle -- only it doesn't work if you click on the `Switch` because it ends up double-toggling.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/widgets/_switch.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING, ClassVar
4
5 from rich.console import RenderableType
6
7 from ..binding import Binding, BindingType
8 from ..events import Click
9 from ..geometry import Size
10 from ..message import Message
11 from ..reactive import reactive
12 from ..scrollbar import ScrollBarRender
13 from ..widget import Widget
14
15 if TYPE_CHECKING:
16 from typing_extensions import Self
17
18
19 class Switch(Widget, can_focus=True):
20 """A switch widget that represents a boolean value.
21
22 Can be toggled by clicking on it or through its [bindings][textual.widgets.Switch.BINDINGS].
23
24 The switch widget also contains [component classes][textual.widgets.Switch.COMPONENT_CLASSES]
25 that enable more customization.
26 """
27
28 BINDINGS: ClassVar[list[BindingType]] = [
29 Binding("enter,space", "toggle", "Toggle", show=False),
30 ]
31 """
32 | Key(s) | Description |
33 | :- | :- |
34 | enter,space | Toggle the switch state. |
35 """
36
37 COMPONENT_CLASSES: ClassVar[set[str]] = {
38 "switch--slider",
39 }
40 """
41 | Class | Description |
42 | :- | :- |
43 | `switch--slider` | Targets the slider of the switch. |
44 """
45
46 DEFAULT_CSS = """
47 Switch {
48 border: tall transparent;
49 background: $boost;
50 height: auto;
51 width: auto;
52 padding: 0 2;
53 }
54
55 Switch > .switch--slider {
56 background: $panel-darken-2;
57 color: $panel-lighten-2;
58 }
59
60 Switch:hover {
61 border: tall $background;
62 }
63
64 Switch:focus {
65 border: tall $accent;
66 }
67
68 Switch.-on {
69
70 }
71
72 Switch.-on > .switch--slider {
73 color: $success;
74 }
75 """
76
77 value = reactive(False, init=False)
78 """The value of the switch; `True` for on and `False` for off."""
79
80 slider_pos = reactive(0.0)
81 """The position of the slider."""
82
83 class Changed(Message, bubble=True):
84 """Posted when the status of the switch changes.
85
86 Can be handled using `on_switch_changed` in a subclass of `Switch`
87 or in a parent widget in the DOM.
88
89 Attributes:
90 value: The value that the switch was changed to.
91 switch: The `Switch` widget that was changed.
92 """
93
94 def __init__(self, switch: Switch, value: bool) -> None:
95 super().__init__()
96 self.value: bool = value
97 self.switch: Switch = switch
98
99 @property
100 def control(self) -> Switch:
101 """Alias for self.switch."""
102 return self.switch
103
104 def __init__(
105 self,
106 value: bool = False,
107 *,
108 animate: bool = True,
109 name: str | None = None,
110 id: str | None = None,
111 classes: str | None = None,
112 disabled: bool = False,
113 ):
114 """Initialise the switch.
115
116 Args:
117 value: The initial value of the switch.
118 animate: True if the switch should animate when toggled.
119 name: The name of the switch.
120 id: The ID of the switch in the DOM.
121 classes: The CSS classes of the switch.
122 disabled: Whether the switch is disabled or not.
123 """
124 super().__init__(name=name, id=id, classes=classes, disabled=disabled)
125 if value:
126 self.slider_pos = 1.0
127 self._reactive_value = value
128 self._should_animate = animate
129
130 def watch_value(self, value: bool) -> None:
131 target_slider_pos = 1.0 if value else 0.0
132 if self._should_animate:
133 self.animate("slider_pos", target_slider_pos, duration=0.3)
134 else:
135 self.slider_pos = target_slider_pos
136 self.post_message(self.Changed(self, self.value))
137
138 def watch_slider_pos(self, slider_pos: float) -> None:
139 self.set_class(slider_pos == 1, "-on")
140
141 def render(self) -> RenderableType:
142 style = self.get_component_rich_style("switch--slider")
143 return ScrollBarRender(
144 virtual_size=100,
145 window_size=50,
146 position=self.slider_pos * 50,
147 style=style,
148 vertical=False,
149 )
150
151 def get_content_width(self, container: Size, viewport: Size) -> int:
152 return 4
153
154 def get_content_height(self, container: Size, viewport: Size, width: int) -> int:
155 return 1
156
157 async def _on_click(self, _: Click) -> None:
158 """Toggle the state of the switch."""
159 self.toggle()
160
161 def action_toggle(self) -> None:
162 """Toggle the state of the switch."""
163 self.toggle()
164
165 def toggle(self) -> Self:
166 """Toggle the switch value.
167
168 As a result of the value changing, a `Switch.Changed` message will
169 be posted.
170
171 Returns:
172 The `Switch` instance.
173 """
174 self.value = not self.value
175 return self
176
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/textual/widgets/_switch.py b/src/textual/widgets/_switch.py
--- a/src/textual/widgets/_switch.py
+++ b/src/textual/widgets/_switch.py
@@ -154,8 +154,9 @@
def get_content_height(self, container: Size, viewport: Size, width: int) -> int:
return 1
- async def _on_click(self, _: Click) -> None:
+ async def _on_click(self, event: Click) -> None:
"""Toggle the state of the switch."""
+ event.stop()
self.toggle()
def action_toggle(self) -> None:
| {"golden_diff": "diff --git a/src/textual/widgets/_switch.py b/src/textual/widgets/_switch.py\n--- a/src/textual/widgets/_switch.py\n+++ b/src/textual/widgets/_switch.py\n@@ -154,8 +154,9 @@\n def get_content_height(self, container: Size, viewport: Size, width: int) -> int:\n return 1\n \n- async def _on_click(self, _: Click) -> None:\n+ async def _on_click(self, event: Click) -> None:\n \"\"\"Toggle the state of the switch.\"\"\"\n+ event.stop()\n self.toggle()\n \n def action_toggle(self) -> None:\n", "issue": "`Switch` should stop the `Click` event from bubbling\nAt the moment `Switch` handles `Click` but then lets it bubble; there's no good reason to do that and it also stops the ability to write something like this:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.containers import Horizontal\r\nfrom textual.widgets import Header, Footer, Label, Switch\r\n\r\nclass LabeledSwitch( Horizontal ):\r\n\r\n def on_click( self ) -> None:\r\n self.query_one(Switch).toggle()\r\n\r\nclass ClickableLabelApp( App[ None ] ):\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Header()\r\n with LabeledSwitch():\r\n yield Label( \"Click me!\" )\r\n yield Switch()\r\n yield Footer()\r\n\r\nif __name__ == \"__main__\":\r\n ClickableLabelApp().run()\r\n```\r\n\r\nwhere the idea is to make a compound widget that lets you click on the `Label` or the `Switch` and the `Switch` will toggle -- only it doesn't work if you click on the `Switch` because it ends up double-toggling.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, ClassVar\n\nfrom rich.console import RenderableType\n\nfrom ..binding import Binding, BindingType\nfrom ..events import Click\nfrom ..geometry import Size\nfrom ..message import Message\nfrom ..reactive import reactive\nfrom ..scrollbar import ScrollBarRender\nfrom ..widget import Widget\n\nif TYPE_CHECKING:\n from typing_extensions import Self\n\n\nclass Switch(Widget, can_focus=True):\n \"\"\"A switch widget that represents a boolean value.\n\n Can be toggled by clicking on it or through its [bindings][textual.widgets.Switch.BINDINGS].\n\n The switch widget also contains [component classes][textual.widgets.Switch.COMPONENT_CLASSES]\n that enable more customization.\n \"\"\"\n\n BINDINGS: ClassVar[list[BindingType]] = [\n Binding(\"enter,space\", \"toggle\", \"Toggle\", show=False),\n ]\n \"\"\"\n | Key(s) | Description |\n | :- | :- |\n | enter,space | Toggle the switch state. |\n \"\"\"\n\n COMPONENT_CLASSES: ClassVar[set[str]] = {\n \"switch--slider\",\n }\n \"\"\"\n | Class | Description |\n | :- | :- |\n | `switch--slider` | Targets the slider of the switch. |\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Switch {\n border: tall transparent;\n background: $boost;\n height: auto;\n width: auto;\n padding: 0 2;\n }\n\n Switch > .switch--slider {\n background: $panel-darken-2;\n color: $panel-lighten-2;\n }\n\n Switch:hover {\n border: tall $background;\n }\n\n Switch:focus {\n border: tall $accent;\n }\n\n Switch.-on {\n\n }\n\n Switch.-on > .switch--slider {\n color: $success;\n }\n \"\"\"\n\n value = reactive(False, init=False)\n \"\"\"The value of the switch; `True` for on and `False` for off.\"\"\"\n\n slider_pos = reactive(0.0)\n \"\"\"The position of the slider.\"\"\"\n\n class Changed(Message, bubble=True):\n \"\"\"Posted when the status of the switch changes.\n\n Can be handled using `on_switch_changed` in a subclass of `Switch`\n or in a parent widget in the DOM.\n\n Attributes:\n value: The value that the switch was changed to.\n switch: The `Switch` widget that was changed.\n \"\"\"\n\n def __init__(self, switch: Switch, value: bool) -> None:\n super().__init__()\n self.value: bool = value\n self.switch: Switch = switch\n\n @property\n def control(self) -> Switch:\n \"\"\"Alias for self.switch.\"\"\"\n return self.switch\n\n def __init__(\n self,\n value: bool = False,\n *,\n animate: bool = True,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ):\n \"\"\"Initialise the switch.\n\n Args:\n value: The initial value of the switch.\n animate: True if the switch should animate when toggled.\n name: The name of the switch.\n id: The ID of the switch in the DOM.\n classes: The CSS classes of the switch.\n disabled: Whether the switch is disabled or not.\n \"\"\"\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n if value:\n self.slider_pos = 1.0\n self._reactive_value = value\n self._should_animate = animate\n\n def watch_value(self, value: bool) -> None:\n target_slider_pos = 1.0 if value else 0.0\n if self._should_animate:\n self.animate(\"slider_pos\", target_slider_pos, duration=0.3)\n else:\n self.slider_pos = target_slider_pos\n self.post_message(self.Changed(self, self.value))\n\n def watch_slider_pos(self, slider_pos: float) -> None:\n self.set_class(slider_pos == 1, \"-on\")\n\n def render(self) -> RenderableType:\n style = self.get_component_rich_style(\"switch--slider\")\n return ScrollBarRender(\n virtual_size=100,\n window_size=50,\n position=self.slider_pos * 50,\n style=style,\n vertical=False,\n )\n\n def get_content_width(self, container: Size, viewport: Size) -> int:\n return 4\n\n def get_content_height(self, container: Size, viewport: Size, width: int) -> int:\n return 1\n\n async def _on_click(self, _: Click) -> None:\n \"\"\"Toggle the state of the switch.\"\"\"\n self.toggle()\n\n def action_toggle(self) -> None:\n \"\"\"Toggle the state of the switch.\"\"\"\n self.toggle()\n\n def toggle(self) -> Self:\n \"\"\"Toggle the switch value.\n\n As a result of the value changing, a `Switch.Changed` message will\n be posted.\n\n Returns:\n The `Switch` instance.\n \"\"\"\n self.value = not self.value\n return self\n", "path": "src/textual/widgets/_switch.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, ClassVar\n\nfrom rich.console import RenderableType\n\nfrom ..binding import Binding, BindingType\nfrom ..events import Click\nfrom ..geometry import Size\nfrom ..message import Message\nfrom ..reactive import reactive\nfrom ..scrollbar import ScrollBarRender\nfrom ..widget import Widget\n\nif TYPE_CHECKING:\n from typing_extensions import Self\n\n\nclass Switch(Widget, can_focus=True):\n \"\"\"A switch widget that represents a boolean value.\n\n Can be toggled by clicking on it or through its [bindings][textual.widgets.Switch.BINDINGS].\n\n The switch widget also contains [component classes][textual.widgets.Switch.COMPONENT_CLASSES]\n that enable more customization.\n \"\"\"\n\n BINDINGS: ClassVar[list[BindingType]] = [\n Binding(\"enter,space\", \"toggle\", \"Toggle\", show=False),\n ]\n \"\"\"\n | Key(s) | Description |\n | :- | :- |\n | enter,space | Toggle the switch state. |\n \"\"\"\n\n COMPONENT_CLASSES: ClassVar[set[str]] = {\n \"switch--slider\",\n }\n \"\"\"\n | Class | Description |\n | :- | :- |\n | `switch--slider` | Targets the slider of the switch. |\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Switch {\n border: tall transparent;\n background: $boost;\n height: auto;\n width: auto;\n padding: 0 2;\n }\n\n Switch > .switch--slider {\n background: $panel-darken-2;\n color: $panel-lighten-2;\n }\n\n Switch:hover {\n border: tall $background;\n }\n\n Switch:focus {\n border: tall $accent;\n }\n\n Switch.-on {\n\n }\n\n Switch.-on > .switch--slider {\n color: $success;\n }\n \"\"\"\n\n value = reactive(False, init=False)\n \"\"\"The value of the switch; `True` for on and `False` for off.\"\"\"\n\n slider_pos = reactive(0.0)\n \"\"\"The position of the slider.\"\"\"\n\n class Changed(Message, bubble=True):\n \"\"\"Posted when the status of the switch changes.\n\n Can be handled using `on_switch_changed` in a subclass of `Switch`\n or in a parent widget in the DOM.\n\n Attributes:\n value: The value that the switch was changed to.\n switch: The `Switch` widget that was changed.\n \"\"\"\n\n def __init__(self, switch: Switch, value: bool) -> None:\n super().__init__()\n self.value: bool = value\n self.switch: Switch = switch\n\n @property\n def control(self) -> Switch:\n \"\"\"Alias for self.switch.\"\"\"\n return self.switch\n\n def __init__(\n self,\n value: bool = False,\n *,\n animate: bool = True,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n disabled: bool = False,\n ):\n \"\"\"Initialise the switch.\n\n Args:\n value: The initial value of the switch.\n animate: True if the switch should animate when toggled.\n name: The name of the switch.\n id: The ID of the switch in the DOM.\n classes: The CSS classes of the switch.\n disabled: Whether the switch is disabled or not.\n \"\"\"\n super().__init__(name=name, id=id, classes=classes, disabled=disabled)\n if value:\n self.slider_pos = 1.0\n self._reactive_value = value\n self._should_animate = animate\n\n def watch_value(self, value: bool) -> None:\n target_slider_pos = 1.0 if value else 0.0\n if self._should_animate:\n self.animate(\"slider_pos\", target_slider_pos, duration=0.3)\n else:\n self.slider_pos = target_slider_pos\n self.post_message(self.Changed(self, self.value))\n\n def watch_slider_pos(self, slider_pos: float) -> None:\n self.set_class(slider_pos == 1, \"-on\")\n\n def render(self) -> RenderableType:\n style = self.get_component_rich_style(\"switch--slider\")\n return ScrollBarRender(\n virtual_size=100,\n window_size=50,\n position=self.slider_pos * 50,\n style=style,\n vertical=False,\n )\n\n def get_content_width(self, container: Size, viewport: Size) -> int:\n return 4\n\n def get_content_height(self, container: Size, viewport: Size, width: int) -> int:\n return 1\n\n async def _on_click(self, event: Click) -> None:\n \"\"\"Toggle the state of the switch.\"\"\"\n event.stop()\n self.toggle()\n\n def action_toggle(self) -> None:\n \"\"\"Toggle the state of the switch.\"\"\"\n self.toggle()\n\n def toggle(self) -> Self:\n \"\"\"Toggle the switch value.\n\n As a result of the value changing, a `Switch.Changed` message will\n be posted.\n\n Returns:\n The `Switch` instance.\n \"\"\"\n self.value = not self.value\n return self\n", "path": "src/textual/widgets/_switch.py"}]} | 2,050 | 140 |
gh_patches_debug_4016 | rasdani/github-patches | git_diff | ansible__awx-14626 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Issue on awx.awx.export/import awx cli/collection
### Please confirm the following
- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).
- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.
- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.
- [X] I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `[email protected]` instead.)
### Bug Summary
Hi team,
I founded tow bugs related awx cli and collection import / export.
The first issue is related export module, that not work if user is a system_auditor (or not have certain admin role in object like schedule.
I already found why this bug is present and proposed a PR: #14626 .
Second "bug" is related import module (I don't know and don't find if someone decide it) and is related how import will be processed.
Actually import perform a `POST`, if object not exists, or a `PUT`, if object exists. In case of we `PUT` an object that already exist api will replace it in all fields, including encrypted key, that in export are removed (correctly).
So, i don't know if it's better approach with `PATCH` instead a `PUT`, but I think that here the issue is in the export itself, that will replace value of encrypted key `$encrypted$` with `''` .
The side effect of "restore" `$encrypted$ ` on the export is that we remove it for the POST, due to the fact that is a reseved keyword.
I will propose a PR also to fix the second bugs
### AWX version
23.3.1
### Select the relevant components
- [ ] UI
- [ ] UI (tech preview)
- [X] API
- [ ] Docs
- [X] Collection
- [X] CLI
- [ ] Other
### Installation method
kubernetes
### Modifications
no
### Ansible version
_No response_
### Operating system
_No response_
### Web browser
_No response_
### Steps to reproduce
bug 1: execute an export of schedule with a system_auditor
bug 2: import a credential already present in AWX
### Expected results
bug 1: export will go fine
bug 2: credential will be updated with only fields present in export
### Actual results
bug 1: export will fail
bug 2: credential will be replaced with exported data. But due to the fact that encrypted key are not exported and replaced the value `$encrypted$` with `''` we replace current secrets with `''`
### Additional information
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awxkit/awxkit/api/utils.py`
Content:
```
1 import logging
2 import re
3
4
5 log = logging.getLogger(__name__)
6
7 descRE = re.compile(r'^[*] `(\w+)`: [^(]*\((\w+), ([^)]+)\)')
8
9
10 def freeze(key):
11 if key is None:
12 return None
13 return frozenset((k, freeze(v) if isinstance(v, dict) else v) for k, v in key.items())
14
15
16 def parse_description(desc):
17 options = {}
18 for line in desc[desc.index('POST') :].splitlines():
19 match = descRE.match(line)
20 if not match:
21 continue
22 options[match.group(1)] = {'type': match.group(2), 'required': match.group(3) == 'required'}
23 return options
24
25
26 def remove_encrypted(value):
27 if value == '$encrypted$':
28 return ''
29 if isinstance(value, list):
30 return [remove_encrypted(item) for item in value]
31 if isinstance(value, dict):
32 return {k: remove_encrypted(v) for k, v in value.items()}
33 return value
34
35
36 def get_post_fields(page, cache):
37 options_page = cache.get_options(page)
38 if options_page is None:
39 return None
40
41 if 'POST' not in options_page.r.headers.get('Allow', ''):
42 return None
43
44 if 'POST' in options_page.json['actions']:
45 return options_page.json['actions']['POST']
46 else:
47 log.warning("Insufficient privileges on %s, inferring POST fields from description.", options_page.endpoint)
48 return parse_description(options_page.json['description'])
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awxkit/awxkit/api/utils.py b/awxkit/awxkit/api/utils.py
--- a/awxkit/awxkit/api/utils.py
+++ b/awxkit/awxkit/api/utils.py
@@ -15,7 +15,12 @@
def parse_description(desc):
options = {}
- for line in desc[desc.index('POST') :].splitlines():
+ desc_lines = []
+ if 'POST' in desc:
+ desc_lines = desc[desc.index('POST') :].splitlines()
+ else:
+ desc_lines = desc.splitlines()
+ for line in desc_lines:
match = descRE.match(line)
if not match:
continue
| {"golden_diff": "diff --git a/awxkit/awxkit/api/utils.py b/awxkit/awxkit/api/utils.py\n--- a/awxkit/awxkit/api/utils.py\n+++ b/awxkit/awxkit/api/utils.py\n@@ -15,7 +15,12 @@\n \n def parse_description(desc):\n options = {}\n- for line in desc[desc.index('POST') :].splitlines():\n+ desc_lines = []\n+ if 'POST' in desc:\n+ desc_lines = desc[desc.index('POST') :].splitlines()\n+ else:\n+ desc_lines = desc.splitlines()\n+ for line in desc_lines:\n match = descRE.match(line)\n if not match:\n continue\n", "issue": "Issue on awx.awx.export/import awx cli/collection\n### Please confirm the following\r\n\r\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\r\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\r\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\r\n- [X] I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `[email protected]` instead.)\r\n\r\n### Bug Summary\r\n\r\nHi team,\r\nI founded tow bugs related awx cli and collection import / export.\r\n\r\nThe first issue is related export module, that not work if user is a system_auditor (or not have certain admin role in object like schedule.\r\nI already found why this bug is present and proposed a PR: #14626 .\r\n\r\nSecond \"bug\" is related import module (I don't know and don't find if someone decide it) and is related how import will be processed.\r\nActually import perform a `POST`, if object not exists, or a `PUT`, if object exists. In case of we `PUT` an object that already exist api will replace it in all fields, including encrypted key, that in export are removed (correctly).\r\n\r\nSo, i don't know if it's better approach with `PATCH` instead a `PUT`, but I think that here the issue is in the export itself, that will replace value of encrypted key `$encrypted$` with `''` .\r\nThe side effect of \"restore\" `$encrypted$ ` on the export is that we remove it for the POST, due to the fact that is a reseved keyword.\r\n \r\n I will propose a PR also to fix the second bugs\r\n\r\n### AWX version\r\n\r\n23.3.1\r\n\r\n### Select the relevant components\r\n\r\n- [ ] UI\r\n- [ ] UI (tech preview)\r\n- [X] API\r\n- [ ] Docs\r\n- [X] Collection\r\n- [X] CLI\r\n- [ ] Other\r\n\r\n### Installation method\r\n\r\nkubernetes\r\n\r\n### Modifications\r\n\r\nno\r\n\r\n### Ansible version\r\n\r\n_No response_\r\n\r\n### Operating system\r\n\r\n_No response_\r\n\r\n### Web browser\r\n\r\n_No response_\r\n\r\n### Steps to reproduce\r\n\r\nbug 1: execute an export of schedule with a system_auditor\r\n\r\nbug 2: import a credential already present in AWX\r\n\r\n### Expected results\r\n\r\nbug 1: export will go fine\r\n\r\nbug 2: credential will be updated with only fields present in export\r\n\r\n### Actual results\r\n\r\nbug 1: export will fail\r\n\r\nbug 2: credential will be replaced with exported data. But due to the fact that encrypted key are not exported and replaced the value `$encrypted$` with `''` we replace current secrets with `''`\r\n\r\n### Additional information\r\n\r\n_No response_\n", "before_files": [{"content": "import logging\nimport re\n\n\nlog = logging.getLogger(__name__)\n\ndescRE = re.compile(r'^[*] `(\\w+)`: [^(]*\\((\\w+), ([^)]+)\\)')\n\n\ndef freeze(key):\n if key is None:\n return None\n return frozenset((k, freeze(v) if isinstance(v, dict) else v) for k, v in key.items())\n\n\ndef parse_description(desc):\n options = {}\n for line in desc[desc.index('POST') :].splitlines():\n match = descRE.match(line)\n if not match:\n continue\n options[match.group(1)] = {'type': match.group(2), 'required': match.group(3) == 'required'}\n return options\n\n\ndef remove_encrypted(value):\n if value == '$encrypted$':\n return ''\n if isinstance(value, list):\n return [remove_encrypted(item) for item in value]\n if isinstance(value, dict):\n return {k: remove_encrypted(v) for k, v in value.items()}\n return value\n\n\ndef get_post_fields(page, cache):\n options_page = cache.get_options(page)\n if options_page is None:\n return None\n\n if 'POST' not in options_page.r.headers.get('Allow', ''):\n return None\n\n if 'POST' in options_page.json['actions']:\n return options_page.json['actions']['POST']\n else:\n log.warning(\"Insufficient privileges on %s, inferring POST fields from description.\", options_page.endpoint)\n return parse_description(options_page.json['description'])\n", "path": "awxkit/awxkit/api/utils.py"}], "after_files": [{"content": "import logging\nimport re\n\n\nlog = logging.getLogger(__name__)\n\ndescRE = re.compile(r'^[*] `(\\w+)`: [^(]*\\((\\w+), ([^)]+)\\)')\n\n\ndef freeze(key):\n if key is None:\n return None\n return frozenset((k, freeze(v) if isinstance(v, dict) else v) for k, v in key.items())\n\n\ndef parse_description(desc):\n options = {}\n desc_lines = []\n if 'POST' in desc:\n desc_lines = desc[desc.index('POST') :].splitlines()\n else:\n desc_lines = desc.splitlines()\n for line in desc_lines:\n match = descRE.match(line)\n if not match:\n continue\n options[match.group(1)] = {'type': match.group(2), 'required': match.group(3) == 'required'}\n return options\n\n\ndef remove_encrypted(value):\n if value == '$encrypted$':\n return ''\n if isinstance(value, list):\n return [remove_encrypted(item) for item in value]\n if isinstance(value, dict):\n return {k: remove_encrypted(v) for k, v in value.items()}\n return value\n\n\ndef get_post_fields(page, cache):\n options_page = cache.get_options(page)\n if options_page is None:\n return None\n\n if 'POST' not in options_page.r.headers.get('Allow', ''):\n return None\n\n if 'POST' in options_page.json['actions']:\n return options_page.json['actions']['POST']\n else:\n log.warning(\"Insufficient privileges on %s, inferring POST fields from description.\", options_page.endpoint)\n return parse_description(options_page.json['description'])\n", "path": "awxkit/awxkit/api/utils.py"}]} | 1,308 | 165 |
gh_patches_debug_6302 | rasdani/github-patches | git_diff | ansible__ansible-38225 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invalid yml causes include_vars to hang or freeze without output
<!---
Verify first that your issue/request is not already reported on GitHub.
Also test if the latest release, and devel branch are affected too.
Always add information AFTER of these html comments.
-->
I made some attempt to sift through the 3,511 issues. Apology for adding yet another to this hopelessly large list. I did *not* set-up for development and try to reproduce.
##### ISSUE TYPE
<!--- Pick one below and delete the rest -->
- Bug Report
##### COMPONENT NAME
<!---
Insert below this comment the name of the module, plugin, task or feature.
Do not include extra details here, e.g. "vyos_command" not "the network module vyos_command" or the full path
-->
include_vars
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes below -->
```
ansible 2.4.3.0
config file = /Users/dclo/Documents/ansible/ansible.cfg
configured module search path = ['/Users/dclo/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /usr/local/lib/python3.6/site-packages/ansible
executable location = /usr/local/bin/ansible
python version = 3.6.4 (default, Mar 22 2018, 13:54:22) [GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.39.2)]
```
##### CONFIGURATION
<!---
If using Ansible 2.4 or above, paste the results of "ansible-config dump --only-changed"
Otherwise, mention any settings you have changed/added/removed in ansible.cfg
(or using the ANSIBLE_* environment variables).
-->
```
DEFAULT_HOST_LIST(/Users/dclo/Documents/ansible/ansible.cfg) = ['/Users/dclo/Documents/ansible/staging']
DEFAULT_VAULT_PASSWORD_FILE(/Users/dclo/Documents/ansible/ansible.cfg) = /Users/dclo/Documents/ansible/.vault_pass
```
##### OS / ENVIRONMENT
<!---
Mention the OS you are running Ansible from, and the OS you are
managing, or say "N/A" for anything that is not platform-specific.
Also mention the specific version of what you are trying to control,
e.g. if this is a network bug the version of firmware on the network device.
-->
macOS High Sierra 10.13.3
##### SUMMARY
<!--- Explain the problem briefly -->
`ansible-playbook test.yml -vvv` outputs the beginning of the `include_vars` task and then hangs.
##### STEPS TO REPRODUCE
<!---
For bugs, show exactly how to reproduce the problem, using a minimal test-case.
For new features, show how the feature would be used.
-->
- Create a vars file with ansible-vault (create/edit) that has malformed yaml.
- Include it in the playbook.
<!--- Paste example playbooks or commands between quotes below -->
```yaml
---
- hosts: all
gather_facts: no
tasks:
- include_vars: "vars/vault.yml"
```
<!--- You can also paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- What did you expect to happen when running the steps above? -->
Ansible outputs syntax error on the yaml file.
##### ACTUAL RESULTS
<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->
Ansible hangs
<!--- Paste verbatim command output between quotes below -->
```
TASK [include_vars] ********************************************************************************
task path: /Users/dclo/Documents/ansible/test.yml:5
^CProcess WorkerProcess-2:
[ERROR]: User interrupted execution
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/ansible/plugins/action/include_vars.py`
Content:
```
1 # (c) 2016, Allen Sanabria <[email protected]>
2 #
3 # This file is part of Ansible
4 #
5 # Ansible is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License as published by
7 # the Free Software Foundation, either version 3 of the License, or
8 # (at your option) any later version.
9 #
10 # Ansible is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License
16 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
17
18 from __future__ import (absolute_import, division, print_function)
19 __metaclass__ = type
20
21 from os import path, walk
22 import re
23
24 from ansible.errors import AnsibleError
25 from ansible.module_utils.six import string_types
26 from ansible.module_utils._text import to_native, to_text
27 from ansible.plugins.action import ActionBase
28
29
30 class ActionModule(ActionBase):
31
32 TRANSFERS_FILES = False
33
34 VALID_FILE_EXTENSIONS = ['yaml', 'yml', 'json']
35 VALID_DIR_ARGUMENTS = ['dir', 'depth', 'files_matching', 'ignore_files', 'extensions']
36 VALID_FILE_ARGUMENTS = ['file', '_raw_params']
37 VALID_ALL = ['name']
38
39 def _set_dir_defaults(self):
40 if not self.depth:
41 self.depth = 0
42
43 if self.files_matching:
44 self.matcher = re.compile(r'{0}'.format(self.files_matching))
45 else:
46 self.matcher = None
47
48 if not self.ignore_files:
49 self.ignore_files = list()
50
51 if isinstance(self.ignore_files, str):
52 self.ignore_files = self.ignore_files.split()
53
54 elif isinstance(self.ignore_files, dict):
55 return {
56 'failed': True,
57 'message': '{0} must be a list'.format(self.ignore_files)
58 }
59
60 def _set_args(self):
61 """ Set instance variables based on the arguments that were passed """
62
63 self.return_results_as_name = self._task.args.get('name', None)
64 self.source_dir = self._task.args.get('dir', None)
65 self.source_file = self._task.args.get('file', None)
66 if not self.source_dir and not self.source_file:
67 self.source_file = self._task.args.get('_raw_params')
68
69 self.depth = self._task.args.get('depth', None)
70 self.files_matching = self._task.args.get('files_matching', None)
71 self.ignore_files = self._task.args.get('ignore_files', None)
72 self.valid_extensions = self._task.args.get('extensions', self.VALID_FILE_EXTENSIONS)
73
74 # convert/validate extensions list
75 if isinstance(self.valid_extensions, string_types):
76 self.valid_extensions = list(self.valid_extensions)
77 if not isinstance(self.valid_extensions, list):
78 raise AnsibleError('Invalid type for "extensions" option, it must be a list')
79
80 def run(self, tmp=None, task_vars=None):
81 """ Load yml files recursively from a directory.
82 """
83 del tmp # tmp no longer has any effect
84
85 if task_vars is None:
86 task_vars = dict()
87
88 self.show_content = True
89 self.included_files = []
90
91 # Validate arguments
92 dirs = 0
93 files = 0
94 for arg in self._task.args:
95 if arg in self.VALID_DIR_ARGUMENTS:
96 dirs += 1
97 elif arg in self.VALID_FILE_ARGUMENTS:
98 files += 1
99 elif arg in self.VALID_ALL:
100 pass
101 else:
102 raise AnsibleError('{0} is not a valid option in debug'.format(arg))
103
104 if dirs and files:
105 raise AnsibleError("Your are mixing file only and dir only arguments, these are incompatible")
106
107 # set internal vars from args
108 self._set_args()
109
110 results = dict()
111 if self.source_dir:
112 self._set_dir_defaults()
113 self._set_root_dir()
114 if not path.exists(self.source_dir):
115 failed = True
116 err_msg = ('{0} directory does not exist'.format(self.source_dir))
117 elif not path.isdir(self.source_dir):
118 failed = True
119 err_msg = ('{0} is not a directory'.format(self.source_dir))
120 else:
121 for root_dir, filenames in self._traverse_dir_depth():
122 failed, err_msg, updated_results = (self._load_files_in_dir(root_dir, filenames))
123 if failed:
124 break
125 results.update(updated_results)
126 else:
127 try:
128 self.source_file = self._find_needle('vars', self.source_file)
129 failed, err_msg, updated_results = (
130 self._load_files(self.source_file)
131 )
132 if not failed:
133 results.update(updated_results)
134
135 except AnsibleError as e:
136 failed = True
137 err_msg = to_native(e)
138
139 if self.return_results_as_name:
140 scope = dict()
141 scope[self.return_results_as_name] = results
142 results = scope
143
144 result = super(ActionModule, self).run(task_vars=task_vars)
145
146 if failed:
147 result['failed'] = failed
148 result['message'] = err_msg
149
150 result['ansible_included_var_files'] = self.included_files
151 result['ansible_facts'] = results
152 result['_ansible_no_log'] = not self.show_content
153
154 return result
155
156 def _set_root_dir(self):
157 if self._task._role:
158 if self.source_dir.split('/')[0] == 'vars':
159 path_to_use = (
160 path.join(self._task._role._role_path, self.source_dir)
161 )
162 if path.exists(path_to_use):
163 self.source_dir = path_to_use
164 else:
165 path_to_use = (
166 path.join(
167 self._task._role._role_path, 'vars', self.source_dir
168 )
169 )
170 self.source_dir = path_to_use
171 else:
172 current_dir = (
173 "/".join(self._task._ds._data_source.split('/')[:-1])
174 )
175 self.source_dir = path.join(current_dir, self.source_dir)
176
177 def _traverse_dir_depth(self):
178 """ Recursively iterate over a directory and sort the files in
179 alphabetical order. Do not iterate pass the set depth.
180 The default depth is unlimited.
181 """
182 current_depth = 0
183 sorted_walk = list(walk(self.source_dir))
184 sorted_walk.sort(key=lambda x: x[0])
185 for current_root, current_dir, current_files in sorted_walk:
186 current_depth += 1
187 if current_depth <= self.depth or self.depth == 0:
188 current_files.sort()
189 yield (current_root, current_files)
190 else:
191 break
192
193 def _ignore_file(self, filename):
194 """ Return True if a file matches the list of ignore_files.
195 Args:
196 filename (str): The filename that is being matched against.
197
198 Returns:
199 Boolean
200 """
201 for file_type in self.ignore_files:
202 try:
203 if re.search(r'{0}$'.format(file_type), filename):
204 return True
205 except Exception:
206 err_msg = 'Invalid regular expression: {0}'.format(file_type)
207 raise AnsibleError(err_msg)
208 return False
209
210 def _is_valid_file_ext(self, source_file):
211 """ Verify if source file has a valid extension
212 Args:
213 source_file (str): The full path of source file or source file.
214 Returns:
215 Bool
216 """
217 file_ext = path.splitext(source_file)
218 return bool(len(file_ext) > 1 and file_ext[-1][1:] in self.valid_extensions)
219
220 def _load_files(self, filename, validate_extensions=False):
221 """ Loads a file and converts the output into a valid Python dict.
222 Args:
223 filename (str): The source file.
224
225 Returns:
226 Tuple (bool, str, dict)
227 """
228 results = dict()
229 failed = False
230 err_msg = ''
231 if validate_extensions and not self._is_valid_file_ext(filename):
232 failed = True
233 err_msg = ('{0} does not have a valid extension: {1}' .format(filename, ', '.join(self.valid_extensions)))
234 else:
235 b_data, show_content = self._loader._get_file_contents(filename)
236 data = to_text(b_data, errors='surrogate_or_strict')
237
238 self.show_content = show_content
239 data = self._loader.load(data, show_content)
240 if not data:
241 data = dict()
242 if not isinstance(data, dict):
243 failed = True
244 err_msg = ('{0} must be stored as a dictionary/hash' .format(filename))
245 else:
246 self.included_files.append(filename)
247 results.update(data)
248
249 return failed, err_msg, results
250
251 def _load_files_in_dir(self, root_dir, var_files):
252 """ Load the found yml files and update/overwrite the dictionary.
253 Args:
254 root_dir (str): The base directory of the list of files that is being passed.
255 var_files: (list): List of files to iterate over and load into a dictionary.
256
257 Returns:
258 Tuple (bool, str, dict)
259 """
260 results = dict()
261 failed = False
262 err_msg = ''
263 for filename in var_files:
264 stop_iter = False
265 # Never include main.yml from a role, as that is the default included by the role
266 if self._task._role:
267 if filename == 'main.yml':
268 stop_iter = True
269 continue
270
271 filepath = path.join(root_dir, filename)
272 if self.files_matching:
273 if not self.matcher.search(filename):
274 stop_iter = True
275
276 if not stop_iter and not failed:
277 if path.exists(filepath) and not self._ignore_file(filename):
278 failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)
279 if not failed:
280 results.update(loaded_data)
281
282 return failed, err_msg, results
283
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/ansible/plugins/action/include_vars.py b/lib/ansible/plugins/action/include_vars.py
--- a/lib/ansible/plugins/action/include_vars.py
+++ b/lib/ansible/plugins/action/include_vars.py
@@ -236,7 +236,7 @@
data = to_text(b_data, errors='surrogate_or_strict')
self.show_content = show_content
- data = self._loader.load(data, show_content)
+ data = self._loader.load(data, file_name=filename, show_content=show_content)
if not data:
data = dict()
if not isinstance(data, dict):
| {"golden_diff": "diff --git a/lib/ansible/plugins/action/include_vars.py b/lib/ansible/plugins/action/include_vars.py\n--- a/lib/ansible/plugins/action/include_vars.py\n+++ b/lib/ansible/plugins/action/include_vars.py\n@@ -236,7 +236,7 @@\n data = to_text(b_data, errors='surrogate_or_strict')\n \n self.show_content = show_content\n- data = self._loader.load(data, show_content)\n+ data = self._loader.load(data, file_name=filename, show_content=show_content)\n if not data:\n data = dict()\n if not isinstance(data, dict):\n", "issue": "Invalid yml causes include_vars to hang or freeze without output\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nAlso test if the latest release, and devel branch are affected too.\r\nAlways add information AFTER of these html comments.\r\n-->\r\n\r\nI made some attempt to sift through the 3,511 issues. Apology for adding yet another to this hopelessly large list. I did *not* set-up for development and try to reproduce.\r\n\r\n##### ISSUE TYPE\r\n<!--- Pick one below and delete the rest -->\r\n - Bug Report\r\n \r\n##### COMPONENT NAME\r\n<!---\r\nInsert below this comment the name of the module, plugin, task or feature.\r\nDo not include extra details here, e.g. \"vyos_command\" not \"the network module vyos_command\" or the full path\r\n-->\r\ninclude_vars\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \"ansible --version\" between quotes below -->\r\n```\r\nansible 2.4.3.0\r\n config file = /Users/dclo/Documents/ansible/ansible.cfg\r\n configured module search path = ['/Users/dclo/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/local/lib/python3.6/site-packages/ansible\r\n executable location = /usr/local/bin/ansible\r\n python version = 3.6.4 (default, Mar 22 2018, 13:54:22) [GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.39.2)]\r\n```\r\n\r\n##### CONFIGURATION\r\n<!---\r\nIf using Ansible 2.4 or above, paste the results of \"ansible-config dump --only-changed\"\r\nOtherwise, mention any settings you have changed/added/removed in ansible.cfg\r\n(or using the ANSIBLE_* environment variables).\r\n-->\r\n```\r\nDEFAULT_HOST_LIST(/Users/dclo/Documents/ansible/ansible.cfg) = ['/Users/dclo/Documents/ansible/staging']\r\nDEFAULT_VAULT_PASSWORD_FILE(/Users/dclo/Documents/ansible/ansible.cfg) = /Users/dclo/Documents/ansible/.vault_pass\r\n```\r\n\r\n##### OS / ENVIRONMENT\r\n<!---\r\nMention the OS you are running Ansible from, and the OS you are\r\nmanaging, or say \"N/A\" for anything that is not platform-specific.\r\nAlso mention the specific version of what you are trying to control,\r\ne.g. if this is a network bug the version of firmware on the network device.\r\n-->\r\nmacOS High Sierra 10.13.3\r\n\r\n##### SUMMARY\r\n<!--- Explain the problem briefly -->\r\n`ansible-playbook test.yml -vvv` outputs the beginning of the `include_vars` task and then hangs.\r\n\r\n##### STEPS TO REPRODUCE\r\n<!---\r\nFor bugs, show exactly how to reproduce the problem, using a minimal test-case.\r\nFor new features, show how the feature would be used.\r\n-->\r\n- Create a vars file with ansible-vault (create/edit) that has malformed yaml.\r\n- Include it in the playbook.\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n---\r\n- hosts: all\r\n gather_facts: no\r\n tasks:\r\n - include_vars: \"vars/vault.yml\"\r\n```\r\n\r\n<!--- You can also paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- What did you expect to happen when running the steps above? -->\r\nAnsible outputs syntax error on the yaml file.\r\n\r\n##### ACTUAL RESULTS\r\n<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->\r\nAnsible hangs\r\n\r\n<!--- Paste verbatim command output between quotes below -->\r\n```\r\nTASK [include_vars] ********************************************************************************\r\ntask path: /Users/dclo/Documents/ansible/test.yml:5\r\n^CProcess WorkerProcess-2:\r\n [ERROR]: User interrupted execution\r\n```\r\n\n", "before_files": [{"content": "# (c) 2016, Allen Sanabria <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom os import path, walk\nimport re\n\nfrom ansible.errors import AnsibleError\nfrom ansible.module_utils.six import string_types\nfrom ansible.module_utils._text import to_native, to_text\nfrom ansible.plugins.action import ActionBase\n\n\nclass ActionModule(ActionBase):\n\n TRANSFERS_FILES = False\n\n VALID_FILE_EXTENSIONS = ['yaml', 'yml', 'json']\n VALID_DIR_ARGUMENTS = ['dir', 'depth', 'files_matching', 'ignore_files', 'extensions']\n VALID_FILE_ARGUMENTS = ['file', '_raw_params']\n VALID_ALL = ['name']\n\n def _set_dir_defaults(self):\n if not self.depth:\n self.depth = 0\n\n if self.files_matching:\n self.matcher = re.compile(r'{0}'.format(self.files_matching))\n else:\n self.matcher = None\n\n if not self.ignore_files:\n self.ignore_files = list()\n\n if isinstance(self.ignore_files, str):\n self.ignore_files = self.ignore_files.split()\n\n elif isinstance(self.ignore_files, dict):\n return {\n 'failed': True,\n 'message': '{0} must be a list'.format(self.ignore_files)\n }\n\n def _set_args(self):\n \"\"\" Set instance variables based on the arguments that were passed \"\"\"\n\n self.return_results_as_name = self._task.args.get('name', None)\n self.source_dir = self._task.args.get('dir', None)\n self.source_file = self._task.args.get('file', None)\n if not self.source_dir and not self.source_file:\n self.source_file = self._task.args.get('_raw_params')\n\n self.depth = self._task.args.get('depth', None)\n self.files_matching = self._task.args.get('files_matching', None)\n self.ignore_files = self._task.args.get('ignore_files', None)\n self.valid_extensions = self._task.args.get('extensions', self.VALID_FILE_EXTENSIONS)\n\n # convert/validate extensions list\n if isinstance(self.valid_extensions, string_types):\n self.valid_extensions = list(self.valid_extensions)\n if not isinstance(self.valid_extensions, list):\n raise AnsibleError('Invalid type for \"extensions\" option, it must be a list')\n\n def run(self, tmp=None, task_vars=None):\n \"\"\" Load yml files recursively from a directory.\n \"\"\"\n del tmp # tmp no longer has any effect\n\n if task_vars is None:\n task_vars = dict()\n\n self.show_content = True\n self.included_files = []\n\n # Validate arguments\n dirs = 0\n files = 0\n for arg in self._task.args:\n if arg in self.VALID_DIR_ARGUMENTS:\n dirs += 1\n elif arg in self.VALID_FILE_ARGUMENTS:\n files += 1\n elif arg in self.VALID_ALL:\n pass\n else:\n raise AnsibleError('{0} is not a valid option in debug'.format(arg))\n\n if dirs and files:\n raise AnsibleError(\"Your are mixing file only and dir only arguments, these are incompatible\")\n\n # set internal vars from args\n self._set_args()\n\n results = dict()\n if self.source_dir:\n self._set_dir_defaults()\n self._set_root_dir()\n if not path.exists(self.source_dir):\n failed = True\n err_msg = ('{0} directory does not exist'.format(self.source_dir))\n elif not path.isdir(self.source_dir):\n failed = True\n err_msg = ('{0} is not a directory'.format(self.source_dir))\n else:\n for root_dir, filenames in self._traverse_dir_depth():\n failed, err_msg, updated_results = (self._load_files_in_dir(root_dir, filenames))\n if failed:\n break\n results.update(updated_results)\n else:\n try:\n self.source_file = self._find_needle('vars', self.source_file)\n failed, err_msg, updated_results = (\n self._load_files(self.source_file)\n )\n if not failed:\n results.update(updated_results)\n\n except AnsibleError as e:\n failed = True\n err_msg = to_native(e)\n\n if self.return_results_as_name:\n scope = dict()\n scope[self.return_results_as_name] = results\n results = scope\n\n result = super(ActionModule, self).run(task_vars=task_vars)\n\n if failed:\n result['failed'] = failed\n result['message'] = err_msg\n\n result['ansible_included_var_files'] = self.included_files\n result['ansible_facts'] = results\n result['_ansible_no_log'] = not self.show_content\n\n return result\n\n def _set_root_dir(self):\n if self._task._role:\n if self.source_dir.split('/')[0] == 'vars':\n path_to_use = (\n path.join(self._task._role._role_path, self.source_dir)\n )\n if path.exists(path_to_use):\n self.source_dir = path_to_use\n else:\n path_to_use = (\n path.join(\n self._task._role._role_path, 'vars', self.source_dir\n )\n )\n self.source_dir = path_to_use\n else:\n current_dir = (\n \"/\".join(self._task._ds._data_source.split('/')[:-1])\n )\n self.source_dir = path.join(current_dir, self.source_dir)\n\n def _traverse_dir_depth(self):\n \"\"\" Recursively iterate over a directory and sort the files in\n alphabetical order. Do not iterate pass the set depth.\n The default depth is unlimited.\n \"\"\"\n current_depth = 0\n sorted_walk = list(walk(self.source_dir))\n sorted_walk.sort(key=lambda x: x[0])\n for current_root, current_dir, current_files in sorted_walk:\n current_depth += 1\n if current_depth <= self.depth or self.depth == 0:\n current_files.sort()\n yield (current_root, current_files)\n else:\n break\n\n def _ignore_file(self, filename):\n \"\"\" Return True if a file matches the list of ignore_files.\n Args:\n filename (str): The filename that is being matched against.\n\n Returns:\n Boolean\n \"\"\"\n for file_type in self.ignore_files:\n try:\n if re.search(r'{0}$'.format(file_type), filename):\n return True\n except Exception:\n err_msg = 'Invalid regular expression: {0}'.format(file_type)\n raise AnsibleError(err_msg)\n return False\n\n def _is_valid_file_ext(self, source_file):\n \"\"\" Verify if source file has a valid extension\n Args:\n source_file (str): The full path of source file or source file.\n Returns:\n Bool\n \"\"\"\n file_ext = path.splitext(source_file)\n return bool(len(file_ext) > 1 and file_ext[-1][1:] in self.valid_extensions)\n\n def _load_files(self, filename, validate_extensions=False):\n \"\"\" Loads a file and converts the output into a valid Python dict.\n Args:\n filename (str): The source file.\n\n Returns:\n Tuple (bool, str, dict)\n \"\"\"\n results = dict()\n failed = False\n err_msg = ''\n if validate_extensions and not self._is_valid_file_ext(filename):\n failed = True\n err_msg = ('{0} does not have a valid extension: {1}' .format(filename, ', '.join(self.valid_extensions)))\n else:\n b_data, show_content = self._loader._get_file_contents(filename)\n data = to_text(b_data, errors='surrogate_or_strict')\n\n self.show_content = show_content\n data = self._loader.load(data, show_content)\n if not data:\n data = dict()\n if not isinstance(data, dict):\n failed = True\n err_msg = ('{0} must be stored as a dictionary/hash' .format(filename))\n else:\n self.included_files.append(filename)\n results.update(data)\n\n return failed, err_msg, results\n\n def _load_files_in_dir(self, root_dir, var_files):\n \"\"\" Load the found yml files and update/overwrite the dictionary.\n Args:\n root_dir (str): The base directory of the list of files that is being passed.\n var_files: (list): List of files to iterate over and load into a dictionary.\n\n Returns:\n Tuple (bool, str, dict)\n \"\"\"\n results = dict()\n failed = False\n err_msg = ''\n for filename in var_files:\n stop_iter = False\n # Never include main.yml from a role, as that is the default included by the role\n if self._task._role:\n if filename == 'main.yml':\n stop_iter = True\n continue\n\n filepath = path.join(root_dir, filename)\n if self.files_matching:\n if not self.matcher.search(filename):\n stop_iter = True\n\n if not stop_iter and not failed:\n if path.exists(filepath) and not self._ignore_file(filename):\n failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)\n if not failed:\n results.update(loaded_data)\n\n return failed, err_msg, results\n", "path": "lib/ansible/plugins/action/include_vars.py"}], "after_files": [{"content": "# (c) 2016, Allen Sanabria <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nfrom os import path, walk\nimport re\n\nfrom ansible.errors import AnsibleError\nfrom ansible.module_utils.six import string_types\nfrom ansible.module_utils._text import to_native, to_text\nfrom ansible.plugins.action import ActionBase\n\n\nclass ActionModule(ActionBase):\n\n TRANSFERS_FILES = False\n\n VALID_FILE_EXTENSIONS = ['yaml', 'yml', 'json']\n VALID_DIR_ARGUMENTS = ['dir', 'depth', 'files_matching', 'ignore_files', 'extensions']\n VALID_FILE_ARGUMENTS = ['file', '_raw_params']\n VALID_ALL = ['name']\n\n def _set_dir_defaults(self):\n if not self.depth:\n self.depth = 0\n\n if self.files_matching:\n self.matcher = re.compile(r'{0}'.format(self.files_matching))\n else:\n self.matcher = None\n\n if not self.ignore_files:\n self.ignore_files = list()\n\n if isinstance(self.ignore_files, str):\n self.ignore_files = self.ignore_files.split()\n\n elif isinstance(self.ignore_files, dict):\n return {\n 'failed': True,\n 'message': '{0} must be a list'.format(self.ignore_files)\n }\n\n def _set_args(self):\n \"\"\" Set instance variables based on the arguments that were passed \"\"\"\n\n self.return_results_as_name = self._task.args.get('name', None)\n self.source_dir = self._task.args.get('dir', None)\n self.source_file = self._task.args.get('file', None)\n if not self.source_dir and not self.source_file:\n self.source_file = self._task.args.get('_raw_params')\n\n self.depth = self._task.args.get('depth', None)\n self.files_matching = self._task.args.get('files_matching', None)\n self.ignore_files = self._task.args.get('ignore_files', None)\n self.valid_extensions = self._task.args.get('extensions', self.VALID_FILE_EXTENSIONS)\n\n # convert/validate extensions list\n if isinstance(self.valid_extensions, string_types):\n self.valid_extensions = list(self.valid_extensions)\n if not isinstance(self.valid_extensions, list):\n raise AnsibleError('Invalid type for \"extensions\" option, it must be a list')\n\n def run(self, tmp=None, task_vars=None):\n \"\"\" Load yml files recursively from a directory.\n \"\"\"\n del tmp # tmp no longer has any effect\n\n if task_vars is None:\n task_vars = dict()\n\n self.show_content = True\n self.included_files = []\n\n # Validate arguments\n dirs = 0\n files = 0\n for arg in self._task.args:\n if arg in self.VALID_DIR_ARGUMENTS:\n dirs += 1\n elif arg in self.VALID_FILE_ARGUMENTS:\n files += 1\n elif arg in self.VALID_ALL:\n pass\n else:\n raise AnsibleError('{0} is not a valid option in debug'.format(arg))\n\n if dirs and files:\n raise AnsibleError(\"Your are mixing file only and dir only arguments, these are incompatible\")\n\n # set internal vars from args\n self._set_args()\n\n results = dict()\n if self.source_dir:\n self._set_dir_defaults()\n self._set_root_dir()\n if not path.exists(self.source_dir):\n failed = True\n err_msg = ('{0} directory does not exist'.format(self.source_dir))\n elif not path.isdir(self.source_dir):\n failed = True\n err_msg = ('{0} is not a directory'.format(self.source_dir))\n else:\n for root_dir, filenames in self._traverse_dir_depth():\n failed, err_msg, updated_results = (self._load_files_in_dir(root_dir, filenames))\n if failed:\n break\n results.update(updated_results)\n else:\n try:\n self.source_file = self._find_needle('vars', self.source_file)\n failed, err_msg, updated_results = (\n self._load_files(self.source_file)\n )\n if not failed:\n results.update(updated_results)\n\n except AnsibleError as e:\n failed = True\n err_msg = to_native(e)\n\n if self.return_results_as_name:\n scope = dict()\n scope[self.return_results_as_name] = results\n results = scope\n\n result = super(ActionModule, self).run(task_vars=task_vars)\n\n if failed:\n result['failed'] = failed\n result['message'] = err_msg\n\n result['ansible_included_var_files'] = self.included_files\n result['ansible_facts'] = results\n result['_ansible_no_log'] = not self.show_content\n\n return result\n\n def _set_root_dir(self):\n if self._task._role:\n if self.source_dir.split('/')[0] == 'vars':\n path_to_use = (\n path.join(self._task._role._role_path, self.source_dir)\n )\n if path.exists(path_to_use):\n self.source_dir = path_to_use\n else:\n path_to_use = (\n path.join(\n self._task._role._role_path, 'vars', self.source_dir\n )\n )\n self.source_dir = path_to_use\n else:\n current_dir = (\n \"/\".join(self._task._ds._data_source.split('/')[:-1])\n )\n self.source_dir = path.join(current_dir, self.source_dir)\n\n def _traverse_dir_depth(self):\n \"\"\" Recursively iterate over a directory and sort the files in\n alphabetical order. Do not iterate pass the set depth.\n The default depth is unlimited.\n \"\"\"\n current_depth = 0\n sorted_walk = list(walk(self.source_dir))\n sorted_walk.sort(key=lambda x: x[0])\n for current_root, current_dir, current_files in sorted_walk:\n current_depth += 1\n if current_depth <= self.depth or self.depth == 0:\n current_files.sort()\n yield (current_root, current_files)\n else:\n break\n\n def _ignore_file(self, filename):\n \"\"\" Return True if a file matches the list of ignore_files.\n Args:\n filename (str): The filename that is being matched against.\n\n Returns:\n Boolean\n \"\"\"\n for file_type in self.ignore_files:\n try:\n if re.search(r'{0}$'.format(file_type), filename):\n return True\n except Exception:\n err_msg = 'Invalid regular expression: {0}'.format(file_type)\n raise AnsibleError(err_msg)\n return False\n\n def _is_valid_file_ext(self, source_file):\n \"\"\" Verify if source file has a valid extension\n Args:\n source_file (str): The full path of source file or source file.\n Returns:\n Bool\n \"\"\"\n file_ext = path.splitext(source_file)\n return bool(len(file_ext) > 1 and file_ext[-1][1:] in self.valid_extensions)\n\n def _load_files(self, filename, validate_extensions=False):\n \"\"\" Loads a file and converts the output into a valid Python dict.\n Args:\n filename (str): The source file.\n\n Returns:\n Tuple (bool, str, dict)\n \"\"\"\n results = dict()\n failed = False\n err_msg = ''\n if validate_extensions and not self._is_valid_file_ext(filename):\n failed = True\n err_msg = ('{0} does not have a valid extension: {1}' .format(filename, ', '.join(self.valid_extensions)))\n else:\n b_data, show_content = self._loader._get_file_contents(filename)\n data = to_text(b_data, errors='surrogate_or_strict')\n\n self.show_content = show_content\n data = self._loader.load(data, file_name=filename, show_content=show_content)\n if not data:\n data = dict()\n if not isinstance(data, dict):\n failed = True\n err_msg = ('{0} must be stored as a dictionary/hash' .format(filename))\n else:\n self.included_files.append(filename)\n results.update(data)\n\n return failed, err_msg, results\n\n def _load_files_in_dir(self, root_dir, var_files):\n \"\"\" Load the found yml files and update/overwrite the dictionary.\n Args:\n root_dir (str): The base directory of the list of files that is being passed.\n var_files: (list): List of files to iterate over and load into a dictionary.\n\n Returns:\n Tuple (bool, str, dict)\n \"\"\"\n results = dict()\n failed = False\n err_msg = ''\n for filename in var_files:\n stop_iter = False\n # Never include main.yml from a role, as that is the default included by the role\n if self._task._role:\n if filename == 'main.yml':\n stop_iter = True\n continue\n\n filepath = path.join(root_dir, filename)\n if self.files_matching:\n if not self.matcher.search(filename):\n stop_iter = True\n\n if not stop_iter and not failed:\n if path.exists(filepath) and not self._ignore_file(filename):\n failed, err_msg, loaded_data = self._load_files(filepath, validate_extensions=True)\n if not failed:\n results.update(loaded_data)\n\n return failed, err_msg, results\n", "path": "lib/ansible/plugins/action/include_vars.py"}]} | 3,995 | 135 |
gh_patches_debug_59177 | rasdani/github-patches | git_diff | fossasia__open-event-server-4147 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IntegrityError: (psycopg2.IntegrityError) column "field_identifier" contains null values
https://sentry.eventyay.com/eventyay/api/issues/25/
```
IntegrityError: (psycopg2.IntegrityError) column "field_identifier" contains null values
[SQL: 'ALTER TABLE custom_forms ADD COLUMN field_identifier VARCHAR NOT NULL']
(25 additional frame(s) were not displayed)
...
File "sqlalchemy/engine/base.py", line 1189, in _execute_context
context)
File "sqlalchemy/engine/base.py", line 1402, in _handle_dbapi_exception
exc_info
File "sqlalchemy/util/compat.py", line 203, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "sqlalchemy/engine/base.py", line 1182, in _execute_context
context)
File "sqlalchemy/engine/default.py", line 470, in do_execute
cursor.execute(statement, parameters)
IntegrityError: (psycopg2.IntegrityError) column "field_identifier" contains null values
[SQL: 'ALTER TABLE custom_forms ADD COLUMN field_identifier VARCHAR NOT NULL']
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `migrations/versions/aefa134809bf_.py`
Content:
```
1 """empty message
2
3 Revision ID: aefa134809bf
4 Revises: 2b39d8c05788
5 Create Date: 2017-07-21 20:37:50.193436
6
7 """
8
9 from alembic import op
10 import sqlalchemy as sa
11 import sqlalchemy_utils
12
13
14 # revision identifiers, used by Alembic.
15 revision = 'aefa134809bf'
16 down_revision = '2b39d8c05788'
17
18
19 def upgrade():
20 # ### commands auto generated by Alembic - please adjust! ###
21 op.add_column('custom_forms', sa.Column('field_identifier', sa.String(), nullable=False))
22 op.add_column('custom_forms', sa.Column('form', sa.String(), nullable=False))
23 op.add_column('custom_forms', sa.Column('is_fixed', sa.Boolean(), nullable=True))
24 op.add_column('custom_forms', sa.Column('is_included', sa.Boolean(), nullable=True))
25 op.add_column('custom_forms', sa.Column('is_required', sa.Boolean(), nullable=True))
26 op.add_column('custom_forms', sa.Column('type', sa.String(), nullable=False))
27 op.create_unique_constraint('custom_form_identifier', 'custom_forms', ['event_id', 'field_identifier', 'form'])
28 op.drop_column('custom_forms', 'speaker_form')
29 op.drop_column('custom_forms', 'session_form')
30 # ### end Alembic commands ###
31
32
33 def downgrade():
34 # ### commands auto generated by Alembic - please adjust! ###
35 op.add_column('custom_forms', sa.Column('session_form', sa.VARCHAR(), autoincrement=False, nullable=False))
36 op.add_column('custom_forms', sa.Column('speaker_form', sa.VARCHAR(), autoincrement=False, nullable=False))
37 op.drop_constraint('custom_form_identifier', 'custom_forms', type_='unique')
38 op.drop_column('custom_forms', 'type')
39 op.drop_column('custom_forms', 'is_required')
40 op.drop_column('custom_forms', 'is_included')
41 op.drop_column('custom_forms', 'is_fixed')
42 op.drop_column('custom_forms', 'form')
43 op.drop_column('custom_forms', 'field_identifier')
44 # ### end Alembic commands ###
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/migrations/versions/aefa134809bf_.py b/migrations/versions/aefa134809bf_.py
--- a/migrations/versions/aefa134809bf_.py
+++ b/migrations/versions/aefa134809bf_.py
@@ -18,6 +18,7 @@
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
+ op.execute("DELETE FROM custom_forms")
op.add_column('custom_forms', sa.Column('field_identifier', sa.String(), nullable=False))
op.add_column('custom_forms', sa.Column('form', sa.String(), nullable=False))
op.add_column('custom_forms', sa.Column('is_fixed', sa.Boolean(), nullable=True))
| {"golden_diff": "diff --git a/migrations/versions/aefa134809bf_.py b/migrations/versions/aefa134809bf_.py\n--- a/migrations/versions/aefa134809bf_.py\n+++ b/migrations/versions/aefa134809bf_.py\n@@ -18,6 +18,7 @@\n \n def upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n+ op.execute(\"DELETE FROM custom_forms\")\n op.add_column('custom_forms', sa.Column('field_identifier', sa.String(), nullable=False))\n op.add_column('custom_forms', sa.Column('form', sa.String(), nullable=False))\n op.add_column('custom_forms', sa.Column('is_fixed', sa.Boolean(), nullable=True))\n", "issue": "IntegrityError: (psycopg2.IntegrityError) column \"field_identifier\" contains null values\nhttps://sentry.eventyay.com/eventyay/api/issues/25/\r\n\r\n```\r\nIntegrityError: (psycopg2.IntegrityError) column \"field_identifier\" contains null values\r\n [SQL: 'ALTER TABLE custom_forms ADD COLUMN field_identifier VARCHAR NOT NULL']\r\n(25 additional frame(s) were not displayed)\r\n...\r\n File \"sqlalchemy/engine/base.py\", line 1189, in _execute_context\r\n context)\r\n File \"sqlalchemy/engine/base.py\", line 1402, in _handle_dbapi_exception\r\n exc_info\r\n File \"sqlalchemy/util/compat.py\", line 203, in raise_from_cause\r\n reraise(type(exception), exception, tb=exc_tb, cause=cause)\r\n File \"sqlalchemy/engine/base.py\", line 1182, in _execute_context\r\n context)\r\n File \"sqlalchemy/engine/default.py\", line 470, in do_execute\r\n cursor.execute(statement, parameters)\r\n\r\nIntegrityError: (psycopg2.IntegrityError) column \"field_identifier\" contains null values\r\n [SQL: 'ALTER TABLE custom_forms ADD COLUMN field_identifier VARCHAR NOT NULL']\r\n```\n", "before_files": [{"content": "\"\"\"empty message\n\nRevision ID: aefa134809bf\nRevises: 2b39d8c05788\nCreate Date: 2017-07-21 20:37:50.193436\n\n\"\"\"\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_utils\n\n\n# revision identifiers, used by Alembic.\nrevision = 'aefa134809bf'\ndown_revision = '2b39d8c05788'\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('custom_forms', sa.Column('field_identifier', sa.String(), nullable=False))\n op.add_column('custom_forms', sa.Column('form', sa.String(), nullable=False))\n op.add_column('custom_forms', sa.Column('is_fixed', sa.Boolean(), nullable=True))\n op.add_column('custom_forms', sa.Column('is_included', sa.Boolean(), nullable=True))\n op.add_column('custom_forms', sa.Column('is_required', sa.Boolean(), nullable=True))\n op.add_column('custom_forms', sa.Column('type', sa.String(), nullable=False))\n op.create_unique_constraint('custom_form_identifier', 'custom_forms', ['event_id', 'field_identifier', 'form'])\n op.drop_column('custom_forms', 'speaker_form')\n op.drop_column('custom_forms', 'session_form')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('custom_forms', sa.Column('session_form', sa.VARCHAR(), autoincrement=False, nullable=False))\n op.add_column('custom_forms', sa.Column('speaker_form', sa.VARCHAR(), autoincrement=False, nullable=False))\n op.drop_constraint('custom_form_identifier', 'custom_forms', type_='unique')\n op.drop_column('custom_forms', 'type')\n op.drop_column('custom_forms', 'is_required')\n op.drop_column('custom_forms', 'is_included')\n op.drop_column('custom_forms', 'is_fixed')\n op.drop_column('custom_forms', 'form')\n op.drop_column('custom_forms', 'field_identifier')\n # ### end Alembic commands ###\n", "path": "migrations/versions/aefa134809bf_.py"}], "after_files": [{"content": "\"\"\"empty message\n\nRevision ID: aefa134809bf\nRevises: 2b39d8c05788\nCreate Date: 2017-07-21 20:37:50.193436\n\n\"\"\"\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlalchemy_utils\n\n\n# revision identifiers, used by Alembic.\nrevision = 'aefa134809bf'\ndown_revision = '2b39d8c05788'\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.execute(\"DELETE FROM custom_forms\")\n op.add_column('custom_forms', sa.Column('field_identifier', sa.String(), nullable=False))\n op.add_column('custom_forms', sa.Column('form', sa.String(), nullable=False))\n op.add_column('custom_forms', sa.Column('is_fixed', sa.Boolean(), nullable=True))\n op.add_column('custom_forms', sa.Column('is_included', sa.Boolean(), nullable=True))\n op.add_column('custom_forms', sa.Column('is_required', sa.Boolean(), nullable=True))\n op.add_column('custom_forms', sa.Column('type', sa.String(), nullable=False))\n op.create_unique_constraint('custom_form_identifier', 'custom_forms', ['event_id', 'field_identifier', 'form'])\n op.drop_column('custom_forms', 'speaker_form')\n op.drop_column('custom_forms', 'session_form')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('custom_forms', sa.Column('session_form', sa.VARCHAR(), autoincrement=False, nullable=False))\n op.add_column('custom_forms', sa.Column('speaker_form', sa.VARCHAR(), autoincrement=False, nullable=False))\n op.drop_constraint('custom_form_identifier', 'custom_forms', type_='unique')\n op.drop_column('custom_forms', 'type')\n op.drop_column('custom_forms', 'is_required')\n op.drop_column('custom_forms', 'is_included')\n op.drop_column('custom_forms', 'is_fixed')\n op.drop_column('custom_forms', 'form')\n op.drop_column('custom_forms', 'field_identifier')\n # ### end Alembic commands ###\n", "path": "migrations/versions/aefa134809bf_.py"}]} | 1,111 | 172 |
gh_patches_debug_18270 | rasdani/github-patches | git_diff | dask__distributed-6904 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Importing from distributed shows pyright error
**What happened**:
When type-checking a program that imports from distributed with pyright, an error is accused:
```python
# foo.py
from distributed import Client
print(Client)
```
```
pyright foo.py
...
/tmp/test-area/foo.py:1:25 - error: "Client" is not exported from module "distributed"
Import from "distributed.client" instead (reportPrivateImportUsage)
1 error, 0 warnings, 0 informations
```
**What you expected to happen**:
I expect the package to be correctly typed, following PEP 484
**Anything else we need to know?**:
PEP 484 states that
> Modules and variables imported into the stub are not considered exported from the stub unless the import uses the `import ... as ... form` or the equivalent `from ... import ... as ... form`
and Pyright follows this guideline, although mypy doesn't.
**Environment**:
- Dask version: 2022.8.0
- Python version: 3.10.5
- Operating System: Arch linux
- Install method (conda, pip, source): pip inside an environment
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `distributed/__init__.py`
Content:
```
1 from __future__ import annotations
2
3 # isort: off
4 from distributed import config # load distributed configuration first
5 from distributed import widgets # load distributed widgets second
6
7 # isort: on
8
9 import atexit
10
11 import dask
12 from dask.config import config # type: ignore
13
14 from distributed._version import get_versions
15 from distributed.actor import Actor, ActorFuture, BaseActorFuture
16 from distributed.client import (
17 Client,
18 CompatibleExecutor,
19 Future,
20 as_completed,
21 default_client,
22 fire_and_forget,
23 futures_of,
24 get_task_metadata,
25 get_task_stream,
26 performance_report,
27 wait,
28 )
29 from distributed.core import Status, connect, rpc
30 from distributed.deploy import Adaptive, LocalCluster, SpecCluster, SSHCluster
31 from distributed.diagnostics.plugin import (
32 Environ,
33 NannyPlugin,
34 PipInstall,
35 SchedulerPlugin,
36 UploadDirectory,
37 UploadFile,
38 WorkerPlugin,
39 )
40 from distributed.diagnostics.progressbar import progress
41 from distributed.event import Event
42 from distributed.lock import Lock
43 from distributed.multi_lock import MultiLock
44 from distributed.nanny import Nanny
45 from distributed.pubsub import Pub, Sub
46 from distributed.queues import Queue
47 from distributed.scheduler import KilledWorker, Scheduler
48 from distributed.security import Security
49 from distributed.semaphore import Semaphore
50 from distributed.threadpoolexecutor import rejoin
51 from distributed.utils import CancelledError, TimeoutError, sync
52 from distributed.variable import Variable
53 from distributed.worker import (
54 Reschedule,
55 Worker,
56 get_client,
57 get_worker,
58 print,
59 secede,
60 warn,
61 )
62 from distributed.worker_client import local_client, worker_client
63
64
65 def __getattr__(name):
66 global __version__, __git_revision__
67
68 if name == "__version__":
69 from importlib.metadata import version
70
71 __version__ = version("distributed")
72 return __version__
73
74 if name == "__git_revision__":
75 from distributed._version import get_versions
76
77 __git_revision__ = get_versions()["full-revisionid"]
78 return __git_revision__
79
80 raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
81
82
83 _python_shutting_down = False
84
85
86 @atexit.register
87 def _():
88 """Set a global when Python shuts down.
89
90 Note
91 ----
92 This function must be registered with atexit *after* any class that invokes
93 ``dstributed.utils.is_python_shutting_down`` has been defined. This way it
94 will be called before the ``__del__`` method of those classes.
95
96 See Also
97 --------
98 distributed.utils.is_python_shutting_down
99 """
100 global _python_shutting_down
101 _python_shutting_down = True
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/distributed/__init__.py b/distributed/__init__.py
--- a/distributed/__init__.py
+++ b/distributed/__init__.py
@@ -99,3 +99,65 @@
"""
global _python_shutting_down
_python_shutting_down = True
+
+
+__all__ = [
+ "Actor",
+ "ActorFuture",
+ "Adaptive",
+ "BaseActorFuture",
+ "CancelledError",
+ "Client",
+ "CompatibleExecutor",
+ "Environ",
+ "Event",
+ "Future",
+ "KilledWorker",
+ "LocalCluster",
+ "Lock",
+ "MultiLock",
+ "Nanny",
+ "NannyPlugin",
+ "PipInstall",
+ "Pub",
+ "Queue",
+ "Reschedule",
+ "SSHCluster",
+ "Scheduler",
+ "SchedulerPlugin",
+ "Security",
+ "Semaphore",
+ "SpecCluster",
+ "Status",
+ "Sub",
+ "TimeoutError",
+ "UploadDirectory",
+ "UploadFile",
+ "Variable",
+ "Worker",
+ "WorkerPlugin",
+ "as_completed",
+ "config",
+ "connect",
+ "dask",
+ "default_client",
+ "fire_and_forget",
+ "futures_of",
+ "get_client",
+ "get_task_metadata",
+ "get_task_stream",
+ "get_versions",
+ "get_worker",
+ "local_client",
+ "performance_report",
+ "print",
+ "progress",
+ "rejoin",
+ "rpc",
+ "secede",
+ "sync",
+ "wait",
+ "warn",
+ "widgets",
+ "worker_client",
+]
| {"golden_diff": "diff --git a/distributed/__init__.py b/distributed/__init__.py\n--- a/distributed/__init__.py\n+++ b/distributed/__init__.py\n@@ -99,3 +99,65 @@\n \"\"\"\n global _python_shutting_down\n _python_shutting_down = True\n+\n+\n+__all__ = [\n+ \"Actor\",\n+ \"ActorFuture\",\n+ \"Adaptive\",\n+ \"BaseActorFuture\",\n+ \"CancelledError\",\n+ \"Client\",\n+ \"CompatibleExecutor\",\n+ \"Environ\",\n+ \"Event\",\n+ \"Future\",\n+ \"KilledWorker\",\n+ \"LocalCluster\",\n+ \"Lock\",\n+ \"MultiLock\",\n+ \"Nanny\",\n+ \"NannyPlugin\",\n+ \"PipInstall\",\n+ \"Pub\",\n+ \"Queue\",\n+ \"Reschedule\",\n+ \"SSHCluster\",\n+ \"Scheduler\",\n+ \"SchedulerPlugin\",\n+ \"Security\",\n+ \"Semaphore\",\n+ \"SpecCluster\",\n+ \"Status\",\n+ \"Sub\",\n+ \"TimeoutError\",\n+ \"UploadDirectory\",\n+ \"UploadFile\",\n+ \"Variable\",\n+ \"Worker\",\n+ \"WorkerPlugin\",\n+ \"as_completed\",\n+ \"config\",\n+ \"connect\",\n+ \"dask\",\n+ \"default_client\",\n+ \"fire_and_forget\",\n+ \"futures_of\",\n+ \"get_client\",\n+ \"get_task_metadata\",\n+ \"get_task_stream\",\n+ \"get_versions\",\n+ \"get_worker\",\n+ \"local_client\",\n+ \"performance_report\",\n+ \"print\",\n+ \"progress\",\n+ \"rejoin\",\n+ \"rpc\",\n+ \"secede\",\n+ \"sync\",\n+ \"wait\",\n+ \"warn\",\n+ \"widgets\",\n+ \"worker_client\",\n+]\n", "issue": "Importing from distributed shows pyright error\n**What happened**:\r\nWhen type-checking a program that imports from distributed with pyright, an error is accused:\r\n\r\n```python\r\n# foo.py\r\nfrom distributed import Client\r\nprint(Client)\r\n```\r\n\r\n```\r\npyright foo.py\r\n...\r\n /tmp/test-area/foo.py:1:25 - error: \"Client\" is not exported from module \"distributed\"\r\n \u00a0\u00a0Import from \"distributed.client\" instead (reportPrivateImportUsage)\r\n1 error, 0 warnings, 0 informations\r\n```\r\n\r\n**What you expected to happen**:\r\nI expect the package to be correctly typed, following PEP 484\r\n\r\n\r\n**Anything else we need to know?**:\r\n\r\nPEP 484 states that\r\n\r\n> Modules and variables imported into the stub are not considered exported from the stub unless the import uses the `import ... as ... form` or the equivalent `from ... import ... as ... form`\r\n\r\nand Pyright follows this guideline, although mypy doesn't.\r\n\r\n**Environment**:\r\n\r\n- Dask version: 2022.8.0\r\n- Python version: 3.10.5\r\n- Operating System: Arch linux\r\n- Install method (conda, pip, source): pip inside an environment\n", "before_files": [{"content": "from __future__ import annotations\n\n# isort: off\nfrom distributed import config # load distributed configuration first\nfrom distributed import widgets # load distributed widgets second\n\n# isort: on\n\nimport atexit\n\nimport dask\nfrom dask.config import config # type: ignore\n\nfrom distributed._version import get_versions\nfrom distributed.actor import Actor, ActorFuture, BaseActorFuture\nfrom distributed.client import (\n Client,\n CompatibleExecutor,\n Future,\n as_completed,\n default_client,\n fire_and_forget,\n futures_of,\n get_task_metadata,\n get_task_stream,\n performance_report,\n wait,\n)\nfrom distributed.core import Status, connect, rpc\nfrom distributed.deploy import Adaptive, LocalCluster, SpecCluster, SSHCluster\nfrom distributed.diagnostics.plugin import (\n Environ,\n NannyPlugin,\n PipInstall,\n SchedulerPlugin,\n UploadDirectory,\n UploadFile,\n WorkerPlugin,\n)\nfrom distributed.diagnostics.progressbar import progress\nfrom distributed.event import Event\nfrom distributed.lock import Lock\nfrom distributed.multi_lock import MultiLock\nfrom distributed.nanny import Nanny\nfrom distributed.pubsub import Pub, Sub\nfrom distributed.queues import Queue\nfrom distributed.scheduler import KilledWorker, Scheduler\nfrom distributed.security import Security\nfrom distributed.semaphore import Semaphore\nfrom distributed.threadpoolexecutor import rejoin\nfrom distributed.utils import CancelledError, TimeoutError, sync\nfrom distributed.variable import Variable\nfrom distributed.worker import (\n Reschedule,\n Worker,\n get_client,\n get_worker,\n print,\n secede,\n warn,\n)\nfrom distributed.worker_client import local_client, worker_client\n\n\ndef __getattr__(name):\n global __version__, __git_revision__\n\n if name == \"__version__\":\n from importlib.metadata import version\n\n __version__ = version(\"distributed\")\n return __version__\n\n if name == \"__git_revision__\":\n from distributed._version import get_versions\n\n __git_revision__ = get_versions()[\"full-revisionid\"]\n return __git_revision__\n\n raise AttributeError(f\"module {__name__!r} has no attribute {name!r}\")\n\n\n_python_shutting_down = False\n\n\[email protected]\ndef _():\n \"\"\"Set a global when Python shuts down.\n\n Note\n ----\n This function must be registered with atexit *after* any class that invokes\n ``dstributed.utils.is_python_shutting_down`` has been defined. This way it\n will be called before the ``__del__`` method of those classes.\n\n See Also\n --------\n distributed.utils.is_python_shutting_down\n \"\"\"\n global _python_shutting_down\n _python_shutting_down = True\n", "path": "distributed/__init__.py"}], "after_files": [{"content": "from __future__ import annotations\n\n# isort: off\nfrom distributed import config # load distributed configuration first\nfrom distributed import widgets # load distributed widgets second\n\n# isort: on\n\nimport atexit\n\nimport dask\nfrom dask.config import config # type: ignore\n\nfrom distributed._version import get_versions\nfrom distributed.actor import Actor, ActorFuture, BaseActorFuture\nfrom distributed.client import (\n Client,\n CompatibleExecutor,\n Future,\n as_completed,\n default_client,\n fire_and_forget,\n futures_of,\n get_task_metadata,\n get_task_stream,\n performance_report,\n wait,\n)\nfrom distributed.core import Status, connect, rpc\nfrom distributed.deploy import Adaptive, LocalCluster, SpecCluster, SSHCluster\nfrom distributed.diagnostics.plugin import (\n Environ,\n NannyPlugin,\n PipInstall,\n SchedulerPlugin,\n UploadDirectory,\n UploadFile,\n WorkerPlugin,\n)\nfrom distributed.diagnostics.progressbar import progress\nfrom distributed.event import Event\nfrom distributed.lock import Lock\nfrom distributed.multi_lock import MultiLock\nfrom distributed.nanny import Nanny\nfrom distributed.pubsub import Pub, Sub\nfrom distributed.queues import Queue\nfrom distributed.scheduler import KilledWorker, Scheduler\nfrom distributed.security import Security\nfrom distributed.semaphore import Semaphore\nfrom distributed.threadpoolexecutor import rejoin\nfrom distributed.utils import CancelledError, TimeoutError, sync\nfrom distributed.variable import Variable\nfrom distributed.worker import (\n Reschedule,\n Worker,\n get_client,\n get_worker,\n print,\n secede,\n warn,\n)\nfrom distributed.worker_client import local_client, worker_client\n\n\ndef __getattr__(name):\n global __version__, __git_revision__\n\n if name == \"__version__\":\n from importlib.metadata import version\n\n __version__ = version(\"distributed\")\n return __version__\n\n if name == \"__git_revision__\":\n from distributed._version import get_versions\n\n __git_revision__ = get_versions()[\"full-revisionid\"]\n return __git_revision__\n\n raise AttributeError(f\"module {__name__!r} has no attribute {name!r}\")\n\n\n_python_shutting_down = False\n\n\[email protected]\ndef _():\n \"\"\"Set a global when Python shuts down.\n\n Note\n ----\n This function must be registered with atexit *after* any class that invokes\n ``dstributed.utils.is_python_shutting_down`` has been defined. This way it\n will be called before the ``__del__`` method of those classes.\n\n See Also\n --------\n distributed.utils.is_python_shutting_down\n \"\"\"\n global _python_shutting_down\n _python_shutting_down = True\n\n\n__all__ = [\n \"Actor\",\n \"ActorFuture\",\n \"Adaptive\",\n \"BaseActorFuture\",\n \"CancelledError\",\n \"Client\",\n \"CompatibleExecutor\",\n \"Environ\",\n \"Event\",\n \"Future\",\n \"KilledWorker\",\n \"LocalCluster\",\n \"Lock\",\n \"MultiLock\",\n \"Nanny\",\n \"NannyPlugin\",\n \"PipInstall\",\n \"Pub\",\n \"Queue\",\n \"Reschedule\",\n \"SSHCluster\",\n \"Scheduler\",\n \"SchedulerPlugin\",\n \"Security\",\n \"Semaphore\",\n \"SpecCluster\",\n \"Status\",\n \"Sub\",\n \"TimeoutError\",\n \"UploadDirectory\",\n \"UploadFile\",\n \"Variable\",\n \"Worker\",\n \"WorkerPlugin\",\n \"as_completed\",\n \"config\",\n \"connect\",\n \"dask\",\n \"default_client\",\n \"fire_and_forget\",\n \"futures_of\",\n \"get_client\",\n \"get_task_metadata\",\n \"get_task_stream\",\n \"get_versions\",\n \"get_worker\",\n \"local_client\",\n \"performance_report\",\n \"print\",\n \"progress\",\n \"rejoin\",\n \"rpc\",\n \"secede\",\n \"sync\",\n \"wait\",\n \"warn\",\n \"widgets\",\n \"worker_client\",\n]\n", "path": "distributed/__init__.py"}]} | 1,301 | 412 |
gh_patches_debug_7595 | rasdani/github-patches | git_diff | pypa__setuptools-1530 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Officially support Python 3.7
- [x] Test against 3.7 (#1514)
- [ ] Add Python 3.7 classifier
@jaraco @pganssle @benoit-pierre: Didn't see another issue to track this. Not sure what else is necessary here to consider this "supported", feel free to edit as necessary.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 """
3 Distutils setup file, used to install or test 'setuptools'
4 """
5
6 import io
7 import os
8 import sys
9 import textwrap
10
11 import setuptools
12
13 here = os.path.dirname(__file__)
14
15
16 def require_metadata():
17 "Prevent improper installs without necessary metadata. See #659"
18 egg_info_dir = os.path.join(here, 'setuptools.egg-info')
19 if not os.path.exists(egg_info_dir):
20 msg = (
21 "Cannot build setuptools without metadata. "
22 "Run `bootstrap.py`."
23 )
24 raise RuntimeError(msg)
25
26
27 def read_commands():
28 command_ns = {}
29 cmd_module_path = 'setuptools/command/__init__.py'
30 init_path = os.path.join(here, cmd_module_path)
31 with open(init_path) as init_file:
32 exec(init_file.read(), command_ns)
33 return command_ns['__all__']
34
35
36 def _gen_console_scripts():
37 yield "easy_install = setuptools.command.easy_install:main"
38
39 # Gentoo distributions manage the python-version-specific scripts
40 # themselves, so those platforms define an environment variable to
41 # suppress the creation of the version-specific scripts.
42 var_names = (
43 'SETUPTOOLS_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',
44 'DISTRIBUTE_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',
45 )
46 if any(os.environ.get(var) not in (None, "", "0") for var in var_names):
47 return
48 tmpl = "easy_install-{shortver} = setuptools.command.easy_install:main"
49 yield tmpl.format(shortver=sys.version[:3])
50
51
52 readme_path = os.path.join(here, 'README.rst')
53 with io.open(readme_path, encoding='utf-8') as readme_file:
54 long_description = readme_file.read()
55
56 package_data = dict(
57 setuptools=['script (dev).tmpl', 'script.tmpl', 'site-patch.py'],
58 )
59
60 force_windows_specific_files = (
61 os.environ.get("SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES", "1").lower()
62 not in ("", "0", "false", "no")
63 )
64
65 include_windows_files = (
66 sys.platform == 'win32' or
67 os.name == 'java' and os._name == 'nt' or
68 force_windows_specific_files
69 )
70
71 if include_windows_files:
72 package_data.setdefault('setuptools', []).extend(['*.exe'])
73 package_data.setdefault('setuptools.command', []).extend(['*.xml'])
74
75 needs_wheel = set(['release', 'bdist_wheel']).intersection(sys.argv)
76 wheel = ['wheel'] if needs_wheel else []
77
78
79 def pypi_link(pkg_filename):
80 """
81 Given the filename, including md5 fragment, construct the
82 dependency link for PyPI.
83 """
84 root = 'https://files.pythonhosted.org/packages/source'
85 name, sep, rest = pkg_filename.partition('-')
86 parts = root, name[0], name, pkg_filename
87 return '/'.join(parts)
88
89
90 setup_params = dict(
91 name="setuptools",
92 version="40.5.0",
93 description=(
94 "Easily download, build, install, upgrade, and uninstall "
95 "Python packages"
96 ),
97 author="Python Packaging Authority",
98 author_email="[email protected]",
99 long_description=long_description,
100 long_description_content_type='text/x-rst; charset=UTF-8',
101 keywords="CPAN PyPI distutils eggs package management",
102 url="https://github.com/pypa/setuptools",
103 project_urls={
104 "Documentation": "https://setuptools.readthedocs.io/",
105 },
106 src_root=None,
107 packages=setuptools.find_packages(exclude=['*.tests']),
108 package_data=package_data,
109 py_modules=['easy_install'],
110 zip_safe=True,
111 entry_points={
112 "distutils.commands": [
113 "%(cmd)s = setuptools.command.%(cmd)s:%(cmd)s" % locals()
114 for cmd in read_commands()
115 ],
116 "distutils.setup_keywords": [
117 "eager_resources = setuptools.dist:assert_string_list",
118 "namespace_packages = setuptools.dist:check_nsp",
119 "extras_require = setuptools.dist:check_extras",
120 "install_requires = setuptools.dist:check_requirements",
121 "tests_require = setuptools.dist:check_requirements",
122 "setup_requires = setuptools.dist:check_requirements",
123 "python_requires = setuptools.dist:check_specifier",
124 "entry_points = setuptools.dist:check_entry_points",
125 "test_suite = setuptools.dist:check_test_suite",
126 "zip_safe = setuptools.dist:assert_bool",
127 "package_data = setuptools.dist:check_package_data",
128 "exclude_package_data = setuptools.dist:check_package_data",
129 "include_package_data = setuptools.dist:assert_bool",
130 "packages = setuptools.dist:check_packages",
131 "dependency_links = setuptools.dist:assert_string_list",
132 "test_loader = setuptools.dist:check_importable",
133 "test_runner = setuptools.dist:check_importable",
134 "use_2to3 = setuptools.dist:assert_bool",
135 "convert_2to3_doctests = setuptools.dist:assert_string_list",
136 "use_2to3_fixers = setuptools.dist:assert_string_list",
137 "use_2to3_exclude_fixers = setuptools.dist:assert_string_list",
138 ],
139 "egg_info.writers": [
140 "PKG-INFO = setuptools.command.egg_info:write_pkg_info",
141 "requires.txt = setuptools.command.egg_info:write_requirements",
142 "entry_points.txt = setuptools.command.egg_info:write_entries",
143 "eager_resources.txt = setuptools.command.egg_info:overwrite_arg",
144 (
145 "namespace_packages.txt = "
146 "setuptools.command.egg_info:overwrite_arg"
147 ),
148 "top_level.txt = setuptools.command.egg_info:write_toplevel_names",
149 "depends.txt = setuptools.command.egg_info:warn_depends_obsolete",
150 "dependency_links.txt = setuptools.command.egg_info:overwrite_arg",
151 ],
152 "console_scripts": list(_gen_console_scripts()),
153 "setuptools.installation":
154 ['eggsecutable = setuptools.command.easy_install:bootstrap'],
155 },
156 classifiers=textwrap.dedent("""
157 Development Status :: 5 - Production/Stable
158 Intended Audience :: Developers
159 License :: OSI Approved :: MIT License
160 Operating System :: OS Independent
161 Programming Language :: Python :: 2
162 Programming Language :: Python :: 2.7
163 Programming Language :: Python :: 3
164 Programming Language :: Python :: 3.4
165 Programming Language :: Python :: 3.5
166 Programming Language :: Python :: 3.6
167 Topic :: Software Development :: Libraries :: Python Modules
168 Topic :: System :: Archiving :: Packaging
169 Topic :: System :: Systems Administration
170 Topic :: Utilities
171 """).strip().splitlines(),
172 python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
173 extras_require={
174 "ssl:sys_platform=='win32'": "wincertstore==0.2",
175 "certs": "certifi==2016.9.26",
176 },
177 dependency_links=[
178 pypi_link(
179 'certifi-2016.9.26.tar.gz#md5=baa81e951a29958563689d868ef1064d',
180 ),
181 pypi_link(
182 'wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2',
183 ),
184 ],
185 scripts=[],
186 setup_requires=[
187 ] + wheel,
188 )
189
190 if __name__ == '__main__':
191 # allow setup.py to run from another directory
192 here and os.chdir(here)
193 require_metadata()
194 dist = setuptools.setup(**setup_params)
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -164,6 +164,7 @@
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
+ Programming Language :: Python :: 3.7
Topic :: Software Development :: Libraries :: Python Modules
Topic :: System :: Archiving :: Packaging
Topic :: System :: Systems Administration
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -164,6 +164,7 @@\n Programming Language :: Python :: 3.4\n Programming Language :: Python :: 3.5\n Programming Language :: Python :: 3.6\n+ Programming Language :: Python :: 3.7\n Topic :: Software Development :: Libraries :: Python Modules\n Topic :: System :: Archiving :: Packaging\n Topic :: System :: Systems Administration\n", "issue": "Officially support Python 3.7\n- [x] Test against 3.7 (#1514)\r\n- [ ] Add Python 3.7 classifier\r\n\r\n@jaraco @pganssle @benoit-pierre: Didn't see another issue to track this. Not sure what else is necessary here to consider this \"supported\", feel free to edit as necessary.\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nDistutils setup file, used to install or test 'setuptools'\n\"\"\"\n\nimport io\nimport os\nimport sys\nimport textwrap\n\nimport setuptools\n\nhere = os.path.dirname(__file__)\n\n\ndef require_metadata():\n \"Prevent improper installs without necessary metadata. See #659\"\n egg_info_dir = os.path.join(here, 'setuptools.egg-info')\n if not os.path.exists(egg_info_dir):\n msg = (\n \"Cannot build setuptools without metadata. \"\n \"Run `bootstrap.py`.\"\n )\n raise RuntimeError(msg)\n\n\ndef read_commands():\n command_ns = {}\n cmd_module_path = 'setuptools/command/__init__.py'\n init_path = os.path.join(here, cmd_module_path)\n with open(init_path) as init_file:\n exec(init_file.read(), command_ns)\n return command_ns['__all__']\n\n\ndef _gen_console_scripts():\n yield \"easy_install = setuptools.command.easy_install:main\"\n\n # Gentoo distributions manage the python-version-specific scripts\n # themselves, so those platforms define an environment variable to\n # suppress the creation of the version-specific scripts.\n var_names = (\n 'SETUPTOOLS_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',\n 'DISTRIBUTE_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',\n )\n if any(os.environ.get(var) not in (None, \"\", \"0\") for var in var_names):\n return\n tmpl = \"easy_install-{shortver} = setuptools.command.easy_install:main\"\n yield tmpl.format(shortver=sys.version[:3])\n\n\nreadme_path = os.path.join(here, 'README.rst')\nwith io.open(readme_path, encoding='utf-8') as readme_file:\n long_description = readme_file.read()\n\npackage_data = dict(\n setuptools=['script (dev).tmpl', 'script.tmpl', 'site-patch.py'],\n)\n\nforce_windows_specific_files = (\n os.environ.get(\"SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES\", \"1\").lower()\n not in (\"\", \"0\", \"false\", \"no\")\n)\n\ninclude_windows_files = (\n sys.platform == 'win32' or\n os.name == 'java' and os._name == 'nt' or\n force_windows_specific_files\n)\n\nif include_windows_files:\n package_data.setdefault('setuptools', []).extend(['*.exe'])\n package_data.setdefault('setuptools.command', []).extend(['*.xml'])\n\nneeds_wheel = set(['release', 'bdist_wheel']).intersection(sys.argv)\nwheel = ['wheel'] if needs_wheel else []\n\n\ndef pypi_link(pkg_filename):\n \"\"\"\n Given the filename, including md5 fragment, construct the\n dependency link for PyPI.\n \"\"\"\n root = 'https://files.pythonhosted.org/packages/source'\n name, sep, rest = pkg_filename.partition('-')\n parts = root, name[0], name, pkg_filename\n return '/'.join(parts)\n\n\nsetup_params = dict(\n name=\"setuptools\",\n version=\"40.5.0\",\n description=(\n \"Easily download, build, install, upgrade, and uninstall \"\n \"Python packages\"\n ),\n author=\"Python Packaging Authority\",\n author_email=\"[email protected]\",\n long_description=long_description,\n long_description_content_type='text/x-rst; charset=UTF-8',\n keywords=\"CPAN PyPI distutils eggs package management\",\n url=\"https://github.com/pypa/setuptools\",\n project_urls={\n \"Documentation\": \"https://setuptools.readthedocs.io/\",\n },\n src_root=None,\n packages=setuptools.find_packages(exclude=['*.tests']),\n package_data=package_data,\n py_modules=['easy_install'],\n zip_safe=True,\n entry_points={\n \"distutils.commands\": [\n \"%(cmd)s = setuptools.command.%(cmd)s:%(cmd)s\" % locals()\n for cmd in read_commands()\n ],\n \"distutils.setup_keywords\": [\n \"eager_resources = setuptools.dist:assert_string_list\",\n \"namespace_packages = setuptools.dist:check_nsp\",\n \"extras_require = setuptools.dist:check_extras\",\n \"install_requires = setuptools.dist:check_requirements\",\n \"tests_require = setuptools.dist:check_requirements\",\n \"setup_requires = setuptools.dist:check_requirements\",\n \"python_requires = setuptools.dist:check_specifier\",\n \"entry_points = setuptools.dist:check_entry_points\",\n \"test_suite = setuptools.dist:check_test_suite\",\n \"zip_safe = setuptools.dist:assert_bool\",\n \"package_data = setuptools.dist:check_package_data\",\n \"exclude_package_data = setuptools.dist:check_package_data\",\n \"include_package_data = setuptools.dist:assert_bool\",\n \"packages = setuptools.dist:check_packages\",\n \"dependency_links = setuptools.dist:assert_string_list\",\n \"test_loader = setuptools.dist:check_importable\",\n \"test_runner = setuptools.dist:check_importable\",\n \"use_2to3 = setuptools.dist:assert_bool\",\n \"convert_2to3_doctests = setuptools.dist:assert_string_list\",\n \"use_2to3_fixers = setuptools.dist:assert_string_list\",\n \"use_2to3_exclude_fixers = setuptools.dist:assert_string_list\",\n ],\n \"egg_info.writers\": [\n \"PKG-INFO = setuptools.command.egg_info:write_pkg_info\",\n \"requires.txt = setuptools.command.egg_info:write_requirements\",\n \"entry_points.txt = setuptools.command.egg_info:write_entries\",\n \"eager_resources.txt = setuptools.command.egg_info:overwrite_arg\",\n (\n \"namespace_packages.txt = \"\n \"setuptools.command.egg_info:overwrite_arg\"\n ),\n \"top_level.txt = setuptools.command.egg_info:write_toplevel_names\",\n \"depends.txt = setuptools.command.egg_info:warn_depends_obsolete\",\n \"dependency_links.txt = setuptools.command.egg_info:overwrite_arg\",\n ],\n \"console_scripts\": list(_gen_console_scripts()),\n \"setuptools.installation\":\n ['eggsecutable = setuptools.command.easy_install:bootstrap'],\n },\n classifiers=textwrap.dedent(\"\"\"\n Development Status :: 5 - Production/Stable\n Intended Audience :: Developers\n License :: OSI Approved :: MIT License\n Operating System :: OS Independent\n Programming Language :: Python :: 2\n Programming Language :: Python :: 2.7\n Programming Language :: Python :: 3\n Programming Language :: Python :: 3.4\n Programming Language :: Python :: 3.5\n Programming Language :: Python :: 3.6\n Topic :: Software Development :: Libraries :: Python Modules\n Topic :: System :: Archiving :: Packaging\n Topic :: System :: Systems Administration\n Topic :: Utilities\n \"\"\").strip().splitlines(),\n python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',\n extras_require={\n \"ssl:sys_platform=='win32'\": \"wincertstore==0.2\",\n \"certs\": \"certifi==2016.9.26\",\n },\n dependency_links=[\n pypi_link(\n 'certifi-2016.9.26.tar.gz#md5=baa81e951a29958563689d868ef1064d',\n ),\n pypi_link(\n 'wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2',\n ),\n ],\n scripts=[],\n setup_requires=[\n ] + wheel,\n)\n\nif __name__ == '__main__':\n # allow setup.py to run from another directory\n here and os.chdir(here)\n require_metadata()\n dist = setuptools.setup(**setup_params)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nDistutils setup file, used to install or test 'setuptools'\n\"\"\"\n\nimport io\nimport os\nimport sys\nimport textwrap\n\nimport setuptools\n\nhere = os.path.dirname(__file__)\n\n\ndef require_metadata():\n \"Prevent improper installs without necessary metadata. See #659\"\n egg_info_dir = os.path.join(here, 'setuptools.egg-info')\n if not os.path.exists(egg_info_dir):\n msg = (\n \"Cannot build setuptools without metadata. \"\n \"Run `bootstrap.py`.\"\n )\n raise RuntimeError(msg)\n\n\ndef read_commands():\n command_ns = {}\n cmd_module_path = 'setuptools/command/__init__.py'\n init_path = os.path.join(here, cmd_module_path)\n with open(init_path) as init_file:\n exec(init_file.read(), command_ns)\n return command_ns['__all__']\n\n\ndef _gen_console_scripts():\n yield \"easy_install = setuptools.command.easy_install:main\"\n\n # Gentoo distributions manage the python-version-specific scripts\n # themselves, so those platforms define an environment variable to\n # suppress the creation of the version-specific scripts.\n var_names = (\n 'SETUPTOOLS_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',\n 'DISTRIBUTE_DISABLE_VERSIONED_EASY_INSTALL_SCRIPT',\n )\n if any(os.environ.get(var) not in (None, \"\", \"0\") for var in var_names):\n return\n tmpl = \"easy_install-{shortver} = setuptools.command.easy_install:main\"\n yield tmpl.format(shortver=sys.version[:3])\n\n\nreadme_path = os.path.join(here, 'README.rst')\nwith io.open(readme_path, encoding='utf-8') as readme_file:\n long_description = readme_file.read()\n\npackage_data = dict(\n setuptools=['script (dev).tmpl', 'script.tmpl', 'site-patch.py'],\n)\n\nforce_windows_specific_files = (\n os.environ.get(\"SETUPTOOLS_INSTALL_WINDOWS_SPECIFIC_FILES\", \"1\").lower()\n not in (\"\", \"0\", \"false\", \"no\")\n)\n\ninclude_windows_files = (\n sys.platform == 'win32' or\n os.name == 'java' and os._name == 'nt' or\n force_windows_specific_files\n)\n\nif include_windows_files:\n package_data.setdefault('setuptools', []).extend(['*.exe'])\n package_data.setdefault('setuptools.command', []).extend(['*.xml'])\n\nneeds_wheel = set(['release', 'bdist_wheel']).intersection(sys.argv)\nwheel = ['wheel'] if needs_wheel else []\n\n\ndef pypi_link(pkg_filename):\n \"\"\"\n Given the filename, including md5 fragment, construct the\n dependency link for PyPI.\n \"\"\"\n root = 'https://files.pythonhosted.org/packages/source'\n name, sep, rest = pkg_filename.partition('-')\n parts = root, name[0], name, pkg_filename\n return '/'.join(parts)\n\n\nsetup_params = dict(\n name=\"setuptools\",\n version=\"40.5.0\",\n description=(\n \"Easily download, build, install, upgrade, and uninstall \"\n \"Python packages\"\n ),\n author=\"Python Packaging Authority\",\n author_email=\"[email protected]\",\n long_description=long_description,\n long_description_content_type='text/x-rst; charset=UTF-8',\n keywords=\"CPAN PyPI distutils eggs package management\",\n url=\"https://github.com/pypa/setuptools\",\n project_urls={\n \"Documentation\": \"https://setuptools.readthedocs.io/\",\n },\n src_root=None,\n packages=setuptools.find_packages(exclude=['*.tests']),\n package_data=package_data,\n py_modules=['easy_install'],\n zip_safe=True,\n entry_points={\n \"distutils.commands\": [\n \"%(cmd)s = setuptools.command.%(cmd)s:%(cmd)s\" % locals()\n for cmd in read_commands()\n ],\n \"distutils.setup_keywords\": [\n \"eager_resources = setuptools.dist:assert_string_list\",\n \"namespace_packages = setuptools.dist:check_nsp\",\n \"extras_require = setuptools.dist:check_extras\",\n \"install_requires = setuptools.dist:check_requirements\",\n \"tests_require = setuptools.dist:check_requirements\",\n \"setup_requires = setuptools.dist:check_requirements\",\n \"python_requires = setuptools.dist:check_specifier\",\n \"entry_points = setuptools.dist:check_entry_points\",\n \"test_suite = setuptools.dist:check_test_suite\",\n \"zip_safe = setuptools.dist:assert_bool\",\n \"package_data = setuptools.dist:check_package_data\",\n \"exclude_package_data = setuptools.dist:check_package_data\",\n \"include_package_data = setuptools.dist:assert_bool\",\n \"packages = setuptools.dist:check_packages\",\n \"dependency_links = setuptools.dist:assert_string_list\",\n \"test_loader = setuptools.dist:check_importable\",\n \"test_runner = setuptools.dist:check_importable\",\n \"use_2to3 = setuptools.dist:assert_bool\",\n \"convert_2to3_doctests = setuptools.dist:assert_string_list\",\n \"use_2to3_fixers = setuptools.dist:assert_string_list\",\n \"use_2to3_exclude_fixers = setuptools.dist:assert_string_list\",\n ],\n \"egg_info.writers\": [\n \"PKG-INFO = setuptools.command.egg_info:write_pkg_info\",\n \"requires.txt = setuptools.command.egg_info:write_requirements\",\n \"entry_points.txt = setuptools.command.egg_info:write_entries\",\n \"eager_resources.txt = setuptools.command.egg_info:overwrite_arg\",\n (\n \"namespace_packages.txt = \"\n \"setuptools.command.egg_info:overwrite_arg\"\n ),\n \"top_level.txt = setuptools.command.egg_info:write_toplevel_names\",\n \"depends.txt = setuptools.command.egg_info:warn_depends_obsolete\",\n \"dependency_links.txt = setuptools.command.egg_info:overwrite_arg\",\n ],\n \"console_scripts\": list(_gen_console_scripts()),\n \"setuptools.installation\":\n ['eggsecutable = setuptools.command.easy_install:bootstrap'],\n },\n classifiers=textwrap.dedent(\"\"\"\n Development Status :: 5 - Production/Stable\n Intended Audience :: Developers\n License :: OSI Approved :: MIT License\n Operating System :: OS Independent\n Programming Language :: Python :: 2\n Programming Language :: Python :: 2.7\n Programming Language :: Python :: 3\n Programming Language :: Python :: 3.4\n Programming Language :: Python :: 3.5\n Programming Language :: Python :: 3.6\n Programming Language :: Python :: 3.7\n Topic :: Software Development :: Libraries :: Python Modules\n Topic :: System :: Archiving :: Packaging\n Topic :: System :: Systems Administration\n Topic :: Utilities\n \"\"\").strip().splitlines(),\n python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',\n extras_require={\n \"ssl:sys_platform=='win32'\": \"wincertstore==0.2\",\n \"certs\": \"certifi==2016.9.26\",\n },\n dependency_links=[\n pypi_link(\n 'certifi-2016.9.26.tar.gz#md5=baa81e951a29958563689d868ef1064d',\n ),\n pypi_link(\n 'wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2',\n ),\n ],\n scripts=[],\n setup_requires=[\n ] + wheel,\n)\n\nif __name__ == '__main__':\n # allow setup.py to run from another directory\n here and os.chdir(here)\n require_metadata()\n dist = setuptools.setup(**setup_params)\n", "path": "setup.py"}]} | 2,530 | 108 |
gh_patches_debug_8505 | rasdani/github-patches | git_diff | Textualize__textual-1552 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Change Clock color
Following on from #1411, perhaps the clock colour needs a wee revisit too?

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/widgets/_header.py`
Content:
```
1 from __future__ import annotations
2
3 from datetime import datetime
4
5 from rich.text import Text
6
7 from ..widget import Widget
8 from ..reactive import Reactive, watch
9
10
11 class HeaderIcon(Widget):
12 """Display an 'icon' on the left of the header."""
13
14 DEFAULT_CSS = """
15 HeaderIcon {
16 dock: left;
17 padding: 0 1;
18 width: 8;
19 content-align: left middle;
20 }
21 """
22 icon = Reactive("⭘")
23
24 def render(self):
25 return self.icon
26
27
28 class HeaderClockSpace(Widget):
29 """The space taken up by the clock on the right of the header."""
30
31 DEFAULT_CSS = """
32 HeaderClockSpace {
33 dock: right;
34 width: 10;
35 padding: 0 1;
36 }
37 """
38
39 def render(self) -> str:
40 return ""
41
42
43 class HeaderClock(HeaderClockSpace):
44 """Display a clock on the right of the header."""
45
46 DEFAULT_CSS = """
47 HeaderClock {
48 background: $secondary-background-lighten-1;
49 color: $text;
50 text-opacity: 85%;
51 content-align: center middle;
52 }
53 """
54
55 def on_mount(self) -> None:
56 self.set_interval(1, callback=self.refresh, name=f"update header clock")
57
58 def render(self):
59 return Text(datetime.now().time().strftime("%X"))
60
61
62 class HeaderTitle(Widget):
63 """Display the title / subtitle in the header."""
64
65 DEFAULT_CSS = """
66 HeaderTitle {
67 content-align: center middle;
68 width: 100%;
69 }
70 """
71
72 text: Reactive[str] = Reactive("")
73 sub_text = Reactive("")
74
75 def render(self) -> Text:
76 text = Text(self.text, no_wrap=True, overflow="ellipsis")
77 if self.sub_text:
78 text.append(" — ")
79 text.append(self.sub_text, "dim")
80 return text
81
82
83 class Header(Widget):
84 """A header widget with icon and clock.
85
86 Args:
87 show_clock (bool, optional): True if the clock should be shown on the right of the header.
88 """
89
90 DEFAULT_CSS = """
91 Header {
92 dock: top;
93 width: 100%;
94 background: $foreground 5%;
95 color: $text;
96 height: 1;
97 }
98 Header.-tall {
99 height: 3;
100 }
101 """
102
103 tall = Reactive(False)
104
105 DEFAULT_CLASSES = ""
106
107 def __init__(
108 self,
109 show_clock: bool = False,
110 *,
111 name: str | None = None,
112 id: str | None = None,
113 classes: str | None = None,
114 ):
115 super().__init__(name=name, id=id, classes=classes)
116 self.show_clock = show_clock
117
118 def compose(self):
119 yield HeaderIcon()
120 yield HeaderTitle()
121 yield HeaderClock() if self.show_clock else HeaderClockSpace()
122
123 def watch_tall(self, tall: bool) -> None:
124 self.set_class(tall, "-tall")
125
126 def on_click(self):
127 self.toggle_class("-tall")
128
129 def on_mount(self) -> None:
130 def set_title(title: str) -> None:
131 self.query_one(HeaderTitle).text = title
132
133 def set_sub_title(sub_title: str) -> None:
134 self.query_one(HeaderTitle).sub_text = sub_title
135
136 watch(self.app, "title", set_title)
137 watch(self.app, "sub_title", set_sub_title)
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/textual/widgets/_header.py b/src/textual/widgets/_header.py
--- a/src/textual/widgets/_header.py
+++ b/src/textual/widgets/_header.py
@@ -45,7 +45,7 @@
DEFAULT_CSS = """
HeaderClock {
- background: $secondary-background-lighten-1;
+ background: $foreground-darken-1 5%;
color: $text;
text-opacity: 85%;
content-align: center middle;
@@ -97,7 +97,7 @@
}
Header.-tall {
height: 3;
- }
+ }
"""
tall = Reactive(False)
| {"golden_diff": "diff --git a/src/textual/widgets/_header.py b/src/textual/widgets/_header.py\n--- a/src/textual/widgets/_header.py\n+++ b/src/textual/widgets/_header.py\n@@ -45,7 +45,7 @@\n \n DEFAULT_CSS = \"\"\"\n HeaderClock {\n- background: $secondary-background-lighten-1;\n+ background: $foreground-darken-1 5%;\n color: $text;\n text-opacity: 85%;\n content-align: center middle;\n@@ -97,7 +97,7 @@\n }\n Header.-tall {\n height: 3;\n- } \n+ }\n \"\"\"\n \n tall = Reactive(False)\n", "issue": "Change Clock color\nFollowing on from #1411, perhaps the clock colour needs a wee revisit too?\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom datetime import datetime\n\nfrom rich.text import Text\n\nfrom ..widget import Widget\nfrom ..reactive import Reactive, watch\n\n\nclass HeaderIcon(Widget):\n \"\"\"Display an 'icon' on the left of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderIcon {\n dock: left;\n padding: 0 1;\n width: 8;\n content-align: left middle;\n }\n \"\"\"\n icon = Reactive(\"\u2b58\")\n\n def render(self):\n return self.icon\n\n\nclass HeaderClockSpace(Widget):\n \"\"\"The space taken up by the clock on the right of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderClockSpace {\n dock: right;\n width: 10;\n padding: 0 1;\n }\n \"\"\"\n\n def render(self) -> str:\n return \"\"\n\n\nclass HeaderClock(HeaderClockSpace):\n \"\"\"Display a clock on the right of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderClock {\n background: $secondary-background-lighten-1;\n color: $text;\n text-opacity: 85%;\n content-align: center middle;\n }\n \"\"\"\n\n def on_mount(self) -> None:\n self.set_interval(1, callback=self.refresh, name=f\"update header clock\")\n\n def render(self):\n return Text(datetime.now().time().strftime(\"%X\"))\n\n\nclass HeaderTitle(Widget):\n \"\"\"Display the title / subtitle in the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderTitle {\n content-align: center middle;\n width: 100%;\n }\n \"\"\"\n\n text: Reactive[str] = Reactive(\"\")\n sub_text = Reactive(\"\")\n\n def render(self) -> Text:\n text = Text(self.text, no_wrap=True, overflow=\"ellipsis\")\n if self.sub_text:\n text.append(\" \u2014 \")\n text.append(self.sub_text, \"dim\")\n return text\n\n\nclass Header(Widget):\n \"\"\"A header widget with icon and clock.\n\n Args:\n show_clock (bool, optional): True if the clock should be shown on the right of the header.\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Header {\n dock: top;\n width: 100%;\n background: $foreground 5%;\n color: $text;\n height: 1;\n }\n Header.-tall {\n height: 3;\n } \n \"\"\"\n\n tall = Reactive(False)\n\n DEFAULT_CLASSES = \"\"\n\n def __init__(\n self,\n show_clock: bool = False,\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ):\n super().__init__(name=name, id=id, classes=classes)\n self.show_clock = show_clock\n\n def compose(self):\n yield HeaderIcon()\n yield HeaderTitle()\n yield HeaderClock() if self.show_clock else HeaderClockSpace()\n\n def watch_tall(self, tall: bool) -> None:\n self.set_class(tall, \"-tall\")\n\n def on_click(self):\n self.toggle_class(\"-tall\")\n\n def on_mount(self) -> None:\n def set_title(title: str) -> None:\n self.query_one(HeaderTitle).text = title\n\n def set_sub_title(sub_title: str) -> None:\n self.query_one(HeaderTitle).sub_text = sub_title\n\n watch(self.app, \"title\", set_title)\n watch(self.app, \"sub_title\", set_sub_title)\n", "path": "src/textual/widgets/_header.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom datetime import datetime\n\nfrom rich.text import Text\n\nfrom ..widget import Widget\nfrom ..reactive import Reactive, watch\n\n\nclass HeaderIcon(Widget):\n \"\"\"Display an 'icon' on the left of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderIcon {\n dock: left;\n padding: 0 1;\n width: 8;\n content-align: left middle;\n }\n \"\"\"\n icon = Reactive(\"\u2b58\")\n\n def render(self):\n return self.icon\n\n\nclass HeaderClockSpace(Widget):\n \"\"\"The space taken up by the clock on the right of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderClockSpace {\n dock: right;\n width: 10;\n padding: 0 1;\n }\n \"\"\"\n\n def render(self) -> str:\n return \"\"\n\n\nclass HeaderClock(HeaderClockSpace):\n \"\"\"Display a clock on the right of the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderClock {\n background: $foreground-darken-1 5%;\n color: $text;\n text-opacity: 85%;\n content-align: center middle;\n }\n \"\"\"\n\n def on_mount(self) -> None:\n self.set_interval(1, callback=self.refresh, name=f\"update header clock\")\n\n def render(self):\n return Text(datetime.now().time().strftime(\"%X\"))\n\n\nclass HeaderTitle(Widget):\n \"\"\"Display the title / subtitle in the header.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HeaderTitle {\n content-align: center middle;\n width: 100%;\n }\n \"\"\"\n\n text: Reactive[str] = Reactive(\"\")\n sub_text = Reactive(\"\")\n\n def render(self) -> Text:\n text = Text(self.text, no_wrap=True, overflow=\"ellipsis\")\n if self.sub_text:\n text.append(\" \u2014 \")\n text.append(self.sub_text, \"dim\")\n return text\n\n\nclass Header(Widget):\n \"\"\"A header widget with icon and clock.\n\n Args:\n show_clock (bool, optional): True if the clock should be shown on the right of the header.\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n Header {\n dock: top;\n width: 100%;\n background: $foreground 5%;\n color: $text;\n height: 1;\n }\n Header.-tall {\n height: 3;\n }\n \"\"\"\n\n tall = Reactive(False)\n\n DEFAULT_CLASSES = \"\"\n\n def __init__(\n self,\n show_clock: bool = False,\n *,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ):\n super().__init__(name=name, id=id, classes=classes)\n self.show_clock = show_clock\n\n def compose(self):\n yield HeaderIcon()\n yield HeaderTitle()\n yield HeaderClock() if self.show_clock else HeaderClockSpace()\n\n def watch_tall(self, tall: bool) -> None:\n self.set_class(tall, \"-tall\")\n\n def on_click(self):\n self.toggle_class(\"-tall\")\n\n def on_mount(self) -> None:\n def set_title(title: str) -> None:\n self.query_one(HeaderTitle).text = title\n\n def set_sub_title(sub_title: str) -> None:\n self.query_one(HeaderTitle).sub_text = sub_title\n\n watch(self.app, \"title\", set_title)\n watch(self.app, \"sub_title\", set_sub_title)\n", "path": "src/textual/widgets/_header.py"}]} | 1,438 | 150 |
gh_patches_debug_642 | rasdani/github-patches | git_diff | pex-tool__pex-2062 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.123
On the docket:
+ [x] Create lockfile for xmlsec fails #2063
+ [x] Internal not enough values to unpack error for pex3 lock create 'pip @ https://github.com/pypa/pip/archive/22.0.2.zip' ... #2057
+ [x] Pex lock creation does not handle wheels with non {cp,pp,py} pyver tag. #2059
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.122"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.122"
+__version__ = "2.1.123"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.122\"\n+__version__ = \"2.1.123\"\n", "issue": "Release 2.1.123\nOn the docket:\r\n+ [x] Create lockfile for xmlsec fails #2063\r\n+ [x] Internal not enough values to unpack error for pex3 lock create 'pip @ https://github.com/pypa/pip/archive/22.0.2.zip' ... #2057\r\n+ [x] Pex lock creation does not handle wheels with non {cp,pp,py} pyver tag. #2059\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.122\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.123\"\n", "path": "pex/version.py"}]} | 417 | 98 |
gh_patches_debug_39170 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-1134 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Instrument SQLAlchemy engine connection phase
** Is your feature request related to a problem? **
The SQLAlchemy instrumentation does not trace the actual connection to the database
**Describe the solution you'd like**
I want that `connect` function will also be traced
**Describe alternatives you've considered**
Which alternative solutions or features have you considered?
**Additional context**
We are working with SQLAlchemy (snowflake db) and we implemented a solution where we can see the `connect` span also as attached in the screenshot (the span is `database-connect`)

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Instrument `sqlalchemy`_ to report SQL queries.
17
18 There are two options for instrumenting code. The first option is to use
19 the ``opentelemetry-instrument`` executable which will automatically
20 instrument your SQLAlchemy engine. The second is to programmatically enable
21 instrumentation via the following code:
22
23 .. _sqlalchemy: https://pypi.org/project/sqlalchemy/
24
25 Usage
26 -----
27 .. code:: python
28
29 from sqlalchemy import create_engine
30
31 from opentelemetry.instrumentation.sqlalchemy import SQLAlchemyInstrumentor
32 import sqlalchemy
33
34 engine = create_engine("sqlite:///:memory:")
35 SQLAlchemyInstrumentor().instrument(
36 engine=engine,
37 )
38
39 # of the async variant of SQLAlchemy
40
41 from sqlalchemy.ext.asyncio import create_async_engine
42
43 from opentelemetry.instrumentation.sqlalchemy import SQLAlchemyInstrumentor
44 import sqlalchemy
45
46 engine = create_async_engine("sqlite:///:memory:")
47 SQLAlchemyInstrumentor().instrument(
48 engine=engine.sync_engine
49 )
50
51 API
52 ---
53 """
54 from collections.abc import Sequence
55 from typing import Collection
56
57 import sqlalchemy
58 from packaging.version import parse as parse_version
59 from wrapt import wrap_function_wrapper as _w
60
61 from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
62 from opentelemetry.instrumentation.sqlalchemy.engine import (
63 EngineTracer,
64 _get_tracer,
65 _wrap_create_async_engine,
66 _wrap_create_engine,
67 )
68 from opentelemetry.instrumentation.sqlalchemy.package import _instruments
69 from opentelemetry.instrumentation.utils import unwrap
70
71
72 class SQLAlchemyInstrumentor(BaseInstrumentor):
73 """An instrumentor for SQLAlchemy
74 See `BaseInstrumentor`
75 """
76
77 def instrumentation_dependencies(self) -> Collection[str]:
78 return _instruments
79
80 def _instrument(self, **kwargs):
81 """Instruments SQLAlchemy engine creation methods and the engine
82 if passed as an argument.
83
84 Args:
85 **kwargs: Optional arguments
86 ``engine``: a SQLAlchemy engine instance
87 ``engines``: a list of SQLAlchemy engine instances
88 ``tracer_provider``: a TracerProvider, defaults to global
89
90 Returns:
91 An instrumented engine if passed in as an argument or list of instrumented engines, None otherwise.
92 """
93 tracer_provider = kwargs.get("tracer_provider")
94 _w("sqlalchemy", "create_engine", _wrap_create_engine(tracer_provider))
95 _w(
96 "sqlalchemy.engine",
97 "create_engine",
98 _wrap_create_engine(tracer_provider),
99 )
100 if parse_version(sqlalchemy.__version__).release >= (1, 4):
101 _w(
102 "sqlalchemy.ext.asyncio",
103 "create_async_engine",
104 _wrap_create_async_engine(tracer_provider),
105 )
106
107 if kwargs.get("engine") is not None:
108 return EngineTracer(
109 _get_tracer(tracer_provider),
110 kwargs.get("engine"),
111 kwargs.get("enable_commenter", False),
112 )
113 if kwargs.get("engines") is not None and isinstance(
114 kwargs.get("engines"), Sequence
115 ):
116 return [
117 EngineTracer(
118 _get_tracer(tracer_provider),
119 engine,
120 kwargs.get("enable_commenter", False),
121 )
122 for engine in kwargs.get("engines")
123 ]
124
125 return None
126
127 def _uninstrument(self, **kwargs):
128 unwrap(sqlalchemy, "create_engine")
129 unwrap(sqlalchemy.engine, "create_engine")
130 if parse_version(sqlalchemy.__version__).release >= (1, 4):
131 unwrap(sqlalchemy.ext.asyncio, "create_async_engine")
132
```
Path: `instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import os
15
16 from sqlalchemy.event import listen # pylint: disable=no-name-in-module
17
18 from opentelemetry import trace
19 from opentelemetry.instrumentation.sqlalchemy.package import (
20 _instrumenting_module_name,
21 )
22 from opentelemetry.instrumentation.sqlalchemy.version import __version__
23 from opentelemetry.instrumentation.utils import (
24 _generate_opentelemetry_traceparent,
25 _generate_sql_comment,
26 )
27 from opentelemetry.semconv.trace import NetTransportValues, SpanAttributes
28 from opentelemetry.trace import Span
29 from opentelemetry.trace.status import Status, StatusCode
30
31
32 def _normalize_vendor(vendor):
33 """Return a canonical name for a type of database."""
34 if not vendor:
35 return "db" # should this ever happen?
36
37 if "sqlite" in vendor:
38 return "sqlite"
39
40 if "postgres" in vendor or vendor == "psycopg2":
41 return "postgresql"
42
43 return vendor
44
45
46 def _get_tracer(tracer_provider=None):
47 return trace.get_tracer(
48 _instrumenting_module_name,
49 __version__,
50 tracer_provider=tracer_provider,
51 )
52
53
54 def _wrap_create_async_engine(tracer_provider=None):
55 # pylint: disable=unused-argument
56 def _wrap_create_async_engine_internal(func, module, args, kwargs):
57 """Trace the SQLAlchemy engine, creating an `EngineTracer`
58 object that will listen to SQLAlchemy events.
59 """
60 engine = func(*args, **kwargs)
61 EngineTracer(_get_tracer(tracer_provider), engine.sync_engine)
62 return engine
63
64 return _wrap_create_async_engine_internal
65
66
67 def _wrap_create_engine(tracer_provider=None):
68 # pylint: disable=unused-argument
69 def _wrap_create_engine_internal(func, module, args, kwargs):
70 """Trace the SQLAlchemy engine, creating an `EngineTracer`
71 object that will listen to SQLAlchemy events.
72 """
73 engine = func(*args, **kwargs)
74 EngineTracer(_get_tracer(tracer_provider), engine)
75 return engine
76
77 return _wrap_create_engine_internal
78
79
80 class EngineTracer:
81 def __init__(self, tracer, engine, enable_commenter=False):
82 self.tracer = tracer
83 self.engine = engine
84 self.vendor = _normalize_vendor(engine.name)
85 self.enable_commenter = enable_commenter
86
87 listen(
88 engine, "before_cursor_execute", self._before_cur_exec, retval=True
89 )
90 listen(engine, "after_cursor_execute", _after_cur_exec)
91 listen(engine, "handle_error", _handle_error)
92
93 def _operation_name(self, db_name, statement):
94 parts = []
95 if isinstance(statement, str):
96 # otel spec recommends against parsing SQL queries. We are not trying to parse SQL
97 # but simply truncating the statement to the first word. This covers probably >95%
98 # use cases and uses the SQL statement in span name correctly as per the spec.
99 # For some very special cases it might not record the correct statement if the SQL
100 # dialect is too weird but in any case it shouldn't break anything.
101 parts.append(statement.split()[0])
102 if db_name:
103 parts.append(db_name)
104 if not parts:
105 return self.vendor
106 return " ".join(parts)
107
108 # pylint: disable=unused-argument
109 def _before_cur_exec(
110 self, conn, cursor, statement, params, context, executemany
111 ):
112 attrs, found = _get_attributes_from_url(conn.engine.url)
113 if not found:
114 attrs = _get_attributes_from_cursor(self.vendor, cursor, attrs)
115
116 db_name = attrs.get(SpanAttributes.DB_NAME, "")
117 span = self.tracer.start_span(
118 self._operation_name(db_name, statement),
119 kind=trace.SpanKind.CLIENT,
120 )
121 with trace.use_span(span, end_on_exit=False):
122 if span.is_recording():
123 span.set_attribute(SpanAttributes.DB_STATEMENT, statement)
124 span.set_attribute(SpanAttributes.DB_SYSTEM, self.vendor)
125 for key, value in attrs.items():
126 span.set_attribute(key, value)
127
128 context._otel_span = span
129 if self.enable_commenter:
130 statement = statement + EngineTracer._generate_comment(span=span)
131
132 return statement, params
133
134 @staticmethod
135 def _generate_comment(span: Span) -> str:
136 span_context = span.get_span_context()
137 meta = {}
138 if span_context.is_valid:
139 meta.update(_generate_opentelemetry_traceparent(span))
140 return _generate_sql_comment(**meta)
141
142
143 # pylint: disable=unused-argument
144 def _after_cur_exec(conn, cursor, statement, params, context, executemany):
145 span = getattr(context, "_otel_span", None)
146 if span is None:
147 return
148
149 span.end()
150
151
152 def _handle_error(context):
153 span = getattr(context.execution_context, "_otel_span", None)
154 if span is None:
155 return
156
157 if span.is_recording():
158 span.set_status(
159 Status(
160 StatusCode.ERROR,
161 str(context.original_exception),
162 )
163 )
164 span.end()
165
166
167 def _get_attributes_from_url(url):
168 """Set connection tags from the url. return true if successful."""
169 attrs = {}
170 if url.host:
171 attrs[SpanAttributes.NET_PEER_NAME] = url.host
172 if url.port:
173 attrs[SpanAttributes.NET_PEER_PORT] = url.port
174 if url.database:
175 attrs[SpanAttributes.DB_NAME] = url.database
176 if url.username:
177 attrs[SpanAttributes.DB_USER] = url.username
178 return attrs, bool(url.host)
179
180
181 def _get_attributes_from_cursor(vendor, cursor, attrs):
182 """Attempt to set db connection attributes by introspecting the cursor."""
183 if vendor == "postgresql":
184 info = getattr(getattr(cursor, "connection", None), "info", None)
185 if not info:
186 return attrs
187
188 attrs[SpanAttributes.DB_NAME] = info.dbname
189 is_unix_socket = info.host and info.host.startswith("/")
190
191 if is_unix_socket:
192 attrs[SpanAttributes.NET_TRANSPORT] = NetTransportValues.UNIX.value
193 if info.port:
194 # postgresql enforces this pattern on all socket names
195 attrs[SpanAttributes.NET_PEER_NAME] = os.path.join(
196 info.host, f".s.PGSQL.{info.port}"
197 )
198 else:
199 attrs[
200 SpanAttributes.NET_TRANSPORT
201 ] = NetTransportValues.IP_TCP.value
202 attrs[SpanAttributes.NET_PEER_NAME] = info.host
203 if info.port:
204 attrs[SpanAttributes.NET_PEER_PORT] = int(info.port)
205 return attrs
206
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py
@@ -56,12 +56,14 @@
import sqlalchemy
from packaging.version import parse as parse_version
+from sqlalchemy.engine.base import Engine
from wrapt import wrap_function_wrapper as _w
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
from opentelemetry.instrumentation.sqlalchemy.engine import (
EngineTracer,
_get_tracer,
+ _wrap_connect,
_wrap_create_async_engine,
_wrap_create_engine,
)
@@ -97,13 +99,17 @@
"create_engine",
_wrap_create_engine(tracer_provider),
)
+ _w(
+ "sqlalchemy.engine.base",
+ "Engine.connect",
+ _wrap_connect(tracer_provider),
+ )
if parse_version(sqlalchemy.__version__).release >= (1, 4):
_w(
"sqlalchemy.ext.asyncio",
"create_async_engine",
_wrap_create_async_engine(tracer_provider),
)
-
if kwargs.get("engine") is not None:
return EngineTracer(
_get_tracer(tracer_provider),
@@ -127,5 +133,6 @@
def _uninstrument(self, **kwargs):
unwrap(sqlalchemy, "create_engine")
unwrap(sqlalchemy.engine, "create_engine")
+ unwrap(Engine, "connect")
if parse_version(sqlalchemy.__version__).release >= (1, 4):
unwrap(sqlalchemy.ext.asyncio, "create_async_engine")
diff --git a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py
--- a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py
+++ b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py
@@ -77,6 +77,23 @@
return _wrap_create_engine_internal
+def _wrap_connect(tracer_provider=None):
+ tracer = trace.get_tracer(
+ _instrumenting_module_name,
+ __version__,
+ tracer_provider=tracer_provider,
+ )
+
+ # pylint: disable=unused-argument
+ def _wrap_connect_internal(func, module, args, kwargs):
+ with tracer.start_as_current_span(
+ "connect", kind=trace.SpanKind.CLIENT
+ ):
+ return func(*args, **kwargs)
+
+ return _wrap_connect_internal
+
+
class EngineTracer:
def __init__(self, tracer, engine, enable_commenter=False):
self.tracer = tracer
| {"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py\n@@ -56,12 +56,14 @@\n \n import sqlalchemy\n from packaging.version import parse as parse_version\n+from sqlalchemy.engine.base import Engine\n from wrapt import wrap_function_wrapper as _w\n \n from opentelemetry.instrumentation.instrumentor import BaseInstrumentor\n from opentelemetry.instrumentation.sqlalchemy.engine import (\n EngineTracer,\n _get_tracer,\n+ _wrap_connect,\n _wrap_create_async_engine,\n _wrap_create_engine,\n )\n@@ -97,13 +99,17 @@\n \"create_engine\",\n _wrap_create_engine(tracer_provider),\n )\n+ _w(\n+ \"sqlalchemy.engine.base\",\n+ \"Engine.connect\",\n+ _wrap_connect(tracer_provider),\n+ )\n if parse_version(sqlalchemy.__version__).release >= (1, 4):\n _w(\n \"sqlalchemy.ext.asyncio\",\n \"create_async_engine\",\n _wrap_create_async_engine(tracer_provider),\n )\n-\n if kwargs.get(\"engine\") is not None:\n return EngineTracer(\n _get_tracer(tracer_provider),\n@@ -127,5 +133,6 @@\n def _uninstrument(self, **kwargs):\n unwrap(sqlalchemy, \"create_engine\")\n unwrap(sqlalchemy.engine, \"create_engine\")\n+ unwrap(Engine, \"connect\")\n if parse_version(sqlalchemy.__version__).release >= (1, 4):\n unwrap(sqlalchemy.ext.asyncio, \"create_async_engine\")\ndiff --git a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py\n--- a/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py\n+++ b/instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py\n@@ -77,6 +77,23 @@\n return _wrap_create_engine_internal\n \n \n+def _wrap_connect(tracer_provider=None):\n+ tracer = trace.get_tracer(\n+ _instrumenting_module_name,\n+ __version__,\n+ tracer_provider=tracer_provider,\n+ )\n+\n+ # pylint: disable=unused-argument\n+ def _wrap_connect_internal(func, module, args, kwargs):\n+ with tracer.start_as_current_span(\n+ \"connect\", kind=trace.SpanKind.CLIENT\n+ ):\n+ return func(*args, **kwargs)\n+\n+ return _wrap_connect_internal\n+\n+\n class EngineTracer:\n def __init__(self, tracer, engine, enable_commenter=False):\n self.tracer = tracer\n", "issue": "Instrument SQLAlchemy engine connection phase\n** Is your feature request related to a problem? **\r\nThe SQLAlchemy instrumentation does not trace the actual connection to the database\r\n\r\n**Describe the solution you'd like**\r\nI want that `connect` function will also be traced\r\n\r\n**Describe alternatives you've considered**\r\nWhich alternative solutions or features have you considered?\r\n\r\n**Additional context**\r\nWe are working with SQLAlchemy (snowflake db) and we implemented a solution where we can see the `connect` span also as attached in the screenshot (the span is `database-connect`)\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nInstrument `sqlalchemy`_ to report SQL queries.\n\nThere are two options for instrumenting code. The first option is to use\nthe ``opentelemetry-instrument`` executable which will automatically\ninstrument your SQLAlchemy engine. The second is to programmatically enable\ninstrumentation via the following code:\n\n.. _sqlalchemy: https://pypi.org/project/sqlalchemy/\n\nUsage\n-----\n.. code:: python\n\n from sqlalchemy import create_engine\n\n from opentelemetry.instrumentation.sqlalchemy import SQLAlchemyInstrumentor\n import sqlalchemy\n\n engine = create_engine(\"sqlite:///:memory:\")\n SQLAlchemyInstrumentor().instrument(\n engine=engine,\n )\n\n # of the async variant of SQLAlchemy\n\n from sqlalchemy.ext.asyncio import create_async_engine\n\n from opentelemetry.instrumentation.sqlalchemy import SQLAlchemyInstrumentor\n import sqlalchemy\n\n engine = create_async_engine(\"sqlite:///:memory:\")\n SQLAlchemyInstrumentor().instrument(\n engine=engine.sync_engine\n )\n\nAPI\n---\n\"\"\"\nfrom collections.abc import Sequence\nfrom typing import Collection\n\nimport sqlalchemy\nfrom packaging.version import parse as parse_version\nfrom wrapt import wrap_function_wrapper as _w\n\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.sqlalchemy.engine import (\n EngineTracer,\n _get_tracer,\n _wrap_create_async_engine,\n _wrap_create_engine,\n)\nfrom opentelemetry.instrumentation.sqlalchemy.package import _instruments\nfrom opentelemetry.instrumentation.utils import unwrap\n\n\nclass SQLAlchemyInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for SQLAlchemy\n See `BaseInstrumentor`\n \"\"\"\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n \"\"\"Instruments SQLAlchemy engine creation methods and the engine\n if passed as an argument.\n\n Args:\n **kwargs: Optional arguments\n ``engine``: a SQLAlchemy engine instance\n ``engines``: a list of SQLAlchemy engine instances\n ``tracer_provider``: a TracerProvider, defaults to global\n\n Returns:\n An instrumented engine if passed in as an argument or list of instrumented engines, None otherwise.\n \"\"\"\n tracer_provider = kwargs.get(\"tracer_provider\")\n _w(\"sqlalchemy\", \"create_engine\", _wrap_create_engine(tracer_provider))\n _w(\n \"sqlalchemy.engine\",\n \"create_engine\",\n _wrap_create_engine(tracer_provider),\n )\n if parse_version(sqlalchemy.__version__).release >= (1, 4):\n _w(\n \"sqlalchemy.ext.asyncio\",\n \"create_async_engine\",\n _wrap_create_async_engine(tracer_provider),\n )\n\n if kwargs.get(\"engine\") is not None:\n return EngineTracer(\n _get_tracer(tracer_provider),\n kwargs.get(\"engine\"),\n kwargs.get(\"enable_commenter\", False),\n )\n if kwargs.get(\"engines\") is not None and isinstance(\n kwargs.get(\"engines\"), Sequence\n ):\n return [\n EngineTracer(\n _get_tracer(tracer_provider),\n engine,\n kwargs.get(\"enable_commenter\", False),\n )\n for engine in kwargs.get(\"engines\")\n ]\n\n return None\n\n def _uninstrument(self, **kwargs):\n unwrap(sqlalchemy, \"create_engine\")\n unwrap(sqlalchemy.engine, \"create_engine\")\n if parse_version(sqlalchemy.__version__).release >= (1, 4):\n unwrap(sqlalchemy.ext.asyncio, \"create_async_engine\")\n", "path": "instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\n\nfrom sqlalchemy.event import listen # pylint: disable=no-name-in-module\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.sqlalchemy.package import (\n _instrumenting_module_name,\n)\nfrom opentelemetry.instrumentation.sqlalchemy.version import __version__\nfrom opentelemetry.instrumentation.utils import (\n _generate_opentelemetry_traceparent,\n _generate_sql_comment,\n)\nfrom opentelemetry.semconv.trace import NetTransportValues, SpanAttributes\nfrom opentelemetry.trace import Span\nfrom opentelemetry.trace.status import Status, StatusCode\n\n\ndef _normalize_vendor(vendor):\n \"\"\"Return a canonical name for a type of database.\"\"\"\n if not vendor:\n return \"db\" # should this ever happen?\n\n if \"sqlite\" in vendor:\n return \"sqlite\"\n\n if \"postgres\" in vendor or vendor == \"psycopg2\":\n return \"postgresql\"\n\n return vendor\n\n\ndef _get_tracer(tracer_provider=None):\n return trace.get_tracer(\n _instrumenting_module_name,\n __version__,\n tracer_provider=tracer_provider,\n )\n\n\ndef _wrap_create_async_engine(tracer_provider=None):\n # pylint: disable=unused-argument\n def _wrap_create_async_engine_internal(func, module, args, kwargs):\n \"\"\"Trace the SQLAlchemy engine, creating an `EngineTracer`\n object that will listen to SQLAlchemy events.\n \"\"\"\n engine = func(*args, **kwargs)\n EngineTracer(_get_tracer(tracer_provider), engine.sync_engine)\n return engine\n\n return _wrap_create_async_engine_internal\n\n\ndef _wrap_create_engine(tracer_provider=None):\n # pylint: disable=unused-argument\n def _wrap_create_engine_internal(func, module, args, kwargs):\n \"\"\"Trace the SQLAlchemy engine, creating an `EngineTracer`\n object that will listen to SQLAlchemy events.\n \"\"\"\n engine = func(*args, **kwargs)\n EngineTracer(_get_tracer(tracer_provider), engine)\n return engine\n\n return _wrap_create_engine_internal\n\n\nclass EngineTracer:\n def __init__(self, tracer, engine, enable_commenter=False):\n self.tracer = tracer\n self.engine = engine\n self.vendor = _normalize_vendor(engine.name)\n self.enable_commenter = enable_commenter\n\n listen(\n engine, \"before_cursor_execute\", self._before_cur_exec, retval=True\n )\n listen(engine, \"after_cursor_execute\", _after_cur_exec)\n listen(engine, \"handle_error\", _handle_error)\n\n def _operation_name(self, db_name, statement):\n parts = []\n if isinstance(statement, str):\n # otel spec recommends against parsing SQL queries. We are not trying to parse SQL\n # but simply truncating the statement to the first word. This covers probably >95%\n # use cases and uses the SQL statement in span name correctly as per the spec.\n # For some very special cases it might not record the correct statement if the SQL\n # dialect is too weird but in any case it shouldn't break anything.\n parts.append(statement.split()[0])\n if db_name:\n parts.append(db_name)\n if not parts:\n return self.vendor\n return \" \".join(parts)\n\n # pylint: disable=unused-argument\n def _before_cur_exec(\n self, conn, cursor, statement, params, context, executemany\n ):\n attrs, found = _get_attributes_from_url(conn.engine.url)\n if not found:\n attrs = _get_attributes_from_cursor(self.vendor, cursor, attrs)\n\n db_name = attrs.get(SpanAttributes.DB_NAME, \"\")\n span = self.tracer.start_span(\n self._operation_name(db_name, statement),\n kind=trace.SpanKind.CLIENT,\n )\n with trace.use_span(span, end_on_exit=False):\n if span.is_recording():\n span.set_attribute(SpanAttributes.DB_STATEMENT, statement)\n span.set_attribute(SpanAttributes.DB_SYSTEM, self.vendor)\n for key, value in attrs.items():\n span.set_attribute(key, value)\n\n context._otel_span = span\n if self.enable_commenter:\n statement = statement + EngineTracer._generate_comment(span=span)\n\n return statement, params\n\n @staticmethod\n def _generate_comment(span: Span) -> str:\n span_context = span.get_span_context()\n meta = {}\n if span_context.is_valid:\n meta.update(_generate_opentelemetry_traceparent(span))\n return _generate_sql_comment(**meta)\n\n\n# pylint: disable=unused-argument\ndef _after_cur_exec(conn, cursor, statement, params, context, executemany):\n span = getattr(context, \"_otel_span\", None)\n if span is None:\n return\n\n span.end()\n\n\ndef _handle_error(context):\n span = getattr(context.execution_context, \"_otel_span\", None)\n if span is None:\n return\n\n if span.is_recording():\n span.set_status(\n Status(\n StatusCode.ERROR,\n str(context.original_exception),\n )\n )\n span.end()\n\n\ndef _get_attributes_from_url(url):\n \"\"\"Set connection tags from the url. return true if successful.\"\"\"\n attrs = {}\n if url.host:\n attrs[SpanAttributes.NET_PEER_NAME] = url.host\n if url.port:\n attrs[SpanAttributes.NET_PEER_PORT] = url.port\n if url.database:\n attrs[SpanAttributes.DB_NAME] = url.database\n if url.username:\n attrs[SpanAttributes.DB_USER] = url.username\n return attrs, bool(url.host)\n\n\ndef _get_attributes_from_cursor(vendor, cursor, attrs):\n \"\"\"Attempt to set db connection attributes by introspecting the cursor.\"\"\"\n if vendor == \"postgresql\":\n info = getattr(getattr(cursor, \"connection\", None), \"info\", None)\n if not info:\n return attrs\n\n attrs[SpanAttributes.DB_NAME] = info.dbname\n is_unix_socket = info.host and info.host.startswith(\"/\")\n\n if is_unix_socket:\n attrs[SpanAttributes.NET_TRANSPORT] = NetTransportValues.UNIX.value\n if info.port:\n # postgresql enforces this pattern on all socket names\n attrs[SpanAttributes.NET_PEER_NAME] = os.path.join(\n info.host, f\".s.PGSQL.{info.port}\"\n )\n else:\n attrs[\n SpanAttributes.NET_TRANSPORT\n ] = NetTransportValues.IP_TCP.value\n attrs[SpanAttributes.NET_PEER_NAME] = info.host\n if info.port:\n attrs[SpanAttributes.NET_PEER_PORT] = int(info.port)\n return attrs\n", "path": "instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nInstrument `sqlalchemy`_ to report SQL queries.\n\nThere are two options for instrumenting code. The first option is to use\nthe ``opentelemetry-instrument`` executable which will automatically\ninstrument your SQLAlchemy engine. The second is to programmatically enable\ninstrumentation via the following code:\n\n.. _sqlalchemy: https://pypi.org/project/sqlalchemy/\n\nUsage\n-----\n.. code:: python\n\n from sqlalchemy import create_engine\n\n from opentelemetry.instrumentation.sqlalchemy import SQLAlchemyInstrumentor\n import sqlalchemy\n\n engine = create_engine(\"sqlite:///:memory:\")\n SQLAlchemyInstrumentor().instrument(\n engine=engine,\n )\n\n # of the async variant of SQLAlchemy\n\n from sqlalchemy.ext.asyncio import create_async_engine\n\n from opentelemetry.instrumentation.sqlalchemy import SQLAlchemyInstrumentor\n import sqlalchemy\n\n engine = create_async_engine(\"sqlite:///:memory:\")\n SQLAlchemyInstrumentor().instrument(\n engine=engine.sync_engine\n )\n\nAPI\n---\n\"\"\"\nfrom collections.abc import Sequence\nfrom typing import Collection\n\nimport sqlalchemy\nfrom packaging.version import parse as parse_version\nfrom sqlalchemy.engine.base import Engine\nfrom wrapt import wrap_function_wrapper as _w\n\nfrom opentelemetry.instrumentation.instrumentor import BaseInstrumentor\nfrom opentelemetry.instrumentation.sqlalchemy.engine import (\n EngineTracer,\n _get_tracer,\n _wrap_connect,\n _wrap_create_async_engine,\n _wrap_create_engine,\n)\nfrom opentelemetry.instrumentation.sqlalchemy.package import _instruments\nfrom opentelemetry.instrumentation.utils import unwrap\n\n\nclass SQLAlchemyInstrumentor(BaseInstrumentor):\n \"\"\"An instrumentor for SQLAlchemy\n See `BaseInstrumentor`\n \"\"\"\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return _instruments\n\n def _instrument(self, **kwargs):\n \"\"\"Instruments SQLAlchemy engine creation methods and the engine\n if passed as an argument.\n\n Args:\n **kwargs: Optional arguments\n ``engine``: a SQLAlchemy engine instance\n ``engines``: a list of SQLAlchemy engine instances\n ``tracer_provider``: a TracerProvider, defaults to global\n\n Returns:\n An instrumented engine if passed in as an argument or list of instrumented engines, None otherwise.\n \"\"\"\n tracer_provider = kwargs.get(\"tracer_provider\")\n _w(\"sqlalchemy\", \"create_engine\", _wrap_create_engine(tracer_provider))\n _w(\n \"sqlalchemy.engine\",\n \"create_engine\",\n _wrap_create_engine(tracer_provider),\n )\n _w(\n \"sqlalchemy.engine.base\",\n \"Engine.connect\",\n _wrap_connect(tracer_provider),\n )\n if parse_version(sqlalchemy.__version__).release >= (1, 4):\n _w(\n \"sqlalchemy.ext.asyncio\",\n \"create_async_engine\",\n _wrap_create_async_engine(tracer_provider),\n )\n if kwargs.get(\"engine\") is not None:\n return EngineTracer(\n _get_tracer(tracer_provider),\n kwargs.get(\"engine\"),\n kwargs.get(\"enable_commenter\", False),\n )\n if kwargs.get(\"engines\") is not None and isinstance(\n kwargs.get(\"engines\"), Sequence\n ):\n return [\n EngineTracer(\n _get_tracer(tracer_provider),\n engine,\n kwargs.get(\"enable_commenter\", False),\n )\n for engine in kwargs.get(\"engines\")\n ]\n\n return None\n\n def _uninstrument(self, **kwargs):\n unwrap(sqlalchemy, \"create_engine\")\n unwrap(sqlalchemy.engine, \"create_engine\")\n unwrap(Engine, \"connect\")\n if parse_version(sqlalchemy.__version__).release >= (1, 4):\n unwrap(sqlalchemy.ext.asyncio, \"create_async_engine\")\n", "path": "instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/__init__.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\n\nfrom sqlalchemy.event import listen # pylint: disable=no-name-in-module\n\nfrom opentelemetry import trace\nfrom opentelemetry.instrumentation.sqlalchemy.package import (\n _instrumenting_module_name,\n)\nfrom opentelemetry.instrumentation.sqlalchemy.version import __version__\nfrom opentelemetry.instrumentation.utils import (\n _generate_opentelemetry_traceparent,\n _generate_sql_comment,\n)\nfrom opentelemetry.semconv.trace import NetTransportValues, SpanAttributes\nfrom opentelemetry.trace import Span\nfrom opentelemetry.trace.status import Status, StatusCode\n\n\ndef _normalize_vendor(vendor):\n \"\"\"Return a canonical name for a type of database.\"\"\"\n if not vendor:\n return \"db\" # should this ever happen?\n\n if \"sqlite\" in vendor:\n return \"sqlite\"\n\n if \"postgres\" in vendor or vendor == \"psycopg2\":\n return \"postgresql\"\n\n return vendor\n\n\ndef _get_tracer(tracer_provider=None):\n return trace.get_tracer(\n _instrumenting_module_name,\n __version__,\n tracer_provider=tracer_provider,\n )\n\n\ndef _wrap_create_async_engine(tracer_provider=None):\n # pylint: disable=unused-argument\n def _wrap_create_async_engine_internal(func, module, args, kwargs):\n \"\"\"Trace the SQLAlchemy engine, creating an `EngineTracer`\n object that will listen to SQLAlchemy events.\n \"\"\"\n engine = func(*args, **kwargs)\n EngineTracer(_get_tracer(tracer_provider), engine.sync_engine)\n return engine\n\n return _wrap_create_async_engine_internal\n\n\ndef _wrap_create_engine(tracer_provider=None):\n # pylint: disable=unused-argument\n def _wrap_create_engine_internal(func, module, args, kwargs):\n \"\"\"Trace the SQLAlchemy engine, creating an `EngineTracer`\n object that will listen to SQLAlchemy events.\n \"\"\"\n engine = func(*args, **kwargs)\n EngineTracer(_get_tracer(tracer_provider), engine)\n return engine\n\n return _wrap_create_engine_internal\n\n\ndef _wrap_connect(tracer_provider=None):\n tracer = trace.get_tracer(\n _instrumenting_module_name,\n __version__,\n tracer_provider=tracer_provider,\n )\n\n # pylint: disable=unused-argument\n def _wrap_connect_internal(func, module, args, kwargs):\n with tracer.start_as_current_span(\n \"connect\", kind=trace.SpanKind.CLIENT\n ):\n return func(*args, **kwargs)\n\n return _wrap_connect_internal\n\n\nclass EngineTracer:\n def __init__(self, tracer, engine, enable_commenter=False):\n self.tracer = tracer\n self.engine = engine\n self.vendor = _normalize_vendor(engine.name)\n self.enable_commenter = enable_commenter\n\n listen(\n engine, \"before_cursor_execute\", self._before_cur_exec, retval=True\n )\n listen(engine, \"after_cursor_execute\", _after_cur_exec)\n listen(engine, \"handle_error\", _handle_error)\n\n def _operation_name(self, db_name, statement):\n parts = []\n if isinstance(statement, str):\n # otel spec recommends against parsing SQL queries. We are not trying to parse SQL\n # but simply truncating the statement to the first word. This covers probably >95%\n # use cases and uses the SQL statement in span name correctly as per the spec.\n # For some very special cases it might not record the correct statement if the SQL\n # dialect is too weird but in any case it shouldn't break anything.\n parts.append(statement.split()[0])\n if db_name:\n parts.append(db_name)\n if not parts:\n return self.vendor\n return \" \".join(parts)\n\n # pylint: disable=unused-argument\n def _before_cur_exec(\n self, conn, cursor, statement, params, context, executemany\n ):\n attrs, found = _get_attributes_from_url(conn.engine.url)\n if not found:\n attrs = _get_attributes_from_cursor(self.vendor, cursor, attrs)\n\n db_name = attrs.get(SpanAttributes.DB_NAME, \"\")\n span = self.tracer.start_span(\n self._operation_name(db_name, statement),\n kind=trace.SpanKind.CLIENT,\n )\n with trace.use_span(span, end_on_exit=False):\n if span.is_recording():\n span.set_attribute(SpanAttributes.DB_STATEMENT, statement)\n span.set_attribute(SpanAttributes.DB_SYSTEM, self.vendor)\n for key, value in attrs.items():\n span.set_attribute(key, value)\n\n context._otel_span = span\n if self.enable_commenter:\n statement = statement + EngineTracer._generate_comment(span=span)\n\n return statement, params\n\n @staticmethod\n def _generate_comment(span: Span) -> str:\n span_context = span.get_span_context()\n meta = {}\n if span_context.is_valid:\n meta.update(_generate_opentelemetry_traceparent(span))\n return _generate_sql_comment(**meta)\n\n\n# pylint: disable=unused-argument\ndef _after_cur_exec(conn, cursor, statement, params, context, executemany):\n span = getattr(context, \"_otel_span\", None)\n if span is None:\n return\n\n span.end()\n\n\ndef _handle_error(context):\n span = getattr(context.execution_context, \"_otel_span\", None)\n if span is None:\n return\n\n if span.is_recording():\n span.set_status(\n Status(\n StatusCode.ERROR,\n str(context.original_exception),\n )\n )\n span.end()\n\n\ndef _get_attributes_from_url(url):\n \"\"\"Set connection tags from the url. return true if successful.\"\"\"\n attrs = {}\n if url.host:\n attrs[SpanAttributes.NET_PEER_NAME] = url.host\n if url.port:\n attrs[SpanAttributes.NET_PEER_PORT] = url.port\n if url.database:\n attrs[SpanAttributes.DB_NAME] = url.database\n if url.username:\n attrs[SpanAttributes.DB_USER] = url.username\n return attrs, bool(url.host)\n\n\ndef _get_attributes_from_cursor(vendor, cursor, attrs):\n \"\"\"Attempt to set db connection attributes by introspecting the cursor.\"\"\"\n if vendor == \"postgresql\":\n info = getattr(getattr(cursor, \"connection\", None), \"info\", None)\n if not info:\n return attrs\n\n attrs[SpanAttributes.DB_NAME] = info.dbname\n is_unix_socket = info.host and info.host.startswith(\"/\")\n\n if is_unix_socket:\n attrs[SpanAttributes.NET_TRANSPORT] = NetTransportValues.UNIX.value\n if info.port:\n # postgresql enforces this pattern on all socket names\n attrs[SpanAttributes.NET_PEER_NAME] = os.path.join(\n info.host, f\".s.PGSQL.{info.port}\"\n )\n else:\n attrs[\n SpanAttributes.NET_TRANSPORT\n ] = NetTransportValues.IP_TCP.value\n attrs[SpanAttributes.NET_PEER_NAME] = info.host\n if info.port:\n attrs[SpanAttributes.NET_PEER_PORT] = int(info.port)\n return attrs\n", "path": "instrumentation/opentelemetry-instrumentation-sqlalchemy/src/opentelemetry/instrumentation/sqlalchemy/engine.py"}]} | 3,707 | 699 |
gh_patches_debug_19154 | rasdani/github-patches | git_diff | bokeh__bokeh-6656 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError: 'unicode' does not have the buffer interface
#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)
bokeh 0.12.6
python 2.7.12
64 bit Ubuntu 16.04
Chrome browser
#### Description of expected behavior and the observed behavior
I get `500: Internal Server Error` when I try to use signed sessions with bokeh.
#### Complete, minimal, self-contained example code that reproduces the issue
I use `sliders.py` example from Bokeh Gallery. I use the following to run the app and generate the session id:
Run the app:
```bash
export BOKEH_SECRET_KEY=KKXYhtUJmQ5f8LiQJdeUGOF4HIeTzf5zOBZmlxrXtF6D
export BOKEH_SIGN_SESSIONS=True
bokeh serve sliders.py --session-ids external-signed
```
Generate session id:
```bash
export BOKEH_SECRET_KEY=KKXYhtUJmQ5f8LiQJdeUGOF4HIeTzf5zOBZmlxrXtF6D
export BOKEH_SIGN_SESSIONS=True
python -c "from bokeh.util.session_id import generate_session_id; print(generate_session_id())"
```
Example url to access app:
http://localhost:5006/sliders?bokeh-session-id=4RJKVrnFVe60gB5urh9sE3jUnSGDkJAfCwvoaDsoMB8f-W6QAfyDoxORtN7mb6DHAzftAhpfnxVdzC-6gIT13uV0
#### Stack traceback and/or browser JavaScript console output
JavaScript console output:
```
Failed to load resource: the server responded with a status of 500 (Internal Server Error)
```
Bokeh traceback:
```
2017-07-21 00:23:58,161 Uncaught exception GET /sliders?bokeh-session-id=4RJKVrnFVe60gB5urh9sE3jUnSGDkJAfCwvoaDsoMB8f-W6QAfyDoxORtN7mb6DHAzftAhpfnxVdzC-6gIT13uV0 (::1)
HTTPServerRequest(protocol='http', host='localhost:5006', method='GET', uri='/sliders?bokeh-session-id=4RJKVrnFVe60gB5urh9sE3jUnSGDkJAfCwvoaDsoMB8f-W6QAfyDoxORtN7mb6DHAzftAhpfnxVdzC-6gIT13uV0', version='HTTP/1.1', remote_ip='::1', headers={'Accept-Language': 'en-US,en;q=0.8,ru;q=0.6,de;q=0.4', 'Accept-Encoding': 'gzip, deflate, br', 'Host': 'localhost:5006', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/59.0.3071.109 Chrome/59.0.3071.109 Safari/537.36', 'Connection': 'keep-alive', 'Cookie': '_xsrf=2|4ab25a11|f7d3fbdf1fab4d2b01135d63518a4f9a|1498856594; username-localhost-8888="2|1:0|10:1499014969|23:username-localhost-8888|44:ODVmNmU2NjIwYjUwNDlhYzk2MzY4OWQ5NDU2ZTExYjU=|3a908d5ba83bca558deae2665732f340eeef5ce69a2763c6cef367fd892e22b7"', 'Upgrade-Insecure-Requests': '1'})
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/tornado/web.py", line 1469, in _execute
result = yield result
File "/usr/local/lib/python2.7/dist-packages/tornado/gen.py", line 1015, in run
value = future.result()
File "/usr/local/lib/python2.7/dist-packages/tornado/concurrent.py", line 237, in result
raise_exc_info(self._exc_info)
File "/usr/local/lib/python2.7/dist-packages/tornado/gen.py", line 1021, in run
yielded = self.gen.throw(*exc_info)
File "/usr/local/lib/python2.7/dist-packages/bokeh/server/views/doc_handler.py", line 27, in get
session = yield self.get_session()
File "/usr/local/lib/python2.7/dist-packages/tornado/gen.py", line 1015, in run
value = future.result()
File "/usr/local/lib/python2.7/dist-packages/tornado/concurrent.py", line 237, in result
raise_exc_info(self._exc_info)
File "/usr/local/lib/python2.7/dist-packages/tornado/gen.py", line 285, in wrapper
yielded = next(result)
File "/usr/local/lib/python2.7/dist-packages/bokeh/server/views/session_handler.py", line 36, in get_session
signed=self.application.sign_sessions):
File "/usr/local/lib/python2.7/dist-packages/bokeh/util/session_id.py", line 156, in check_session_id_signature
return hmac.compare_digest(expected_signature, provided_signature)
TypeError: 'unicode' does not have the buffer interface
```
#### Screenshots or screencasts of the bug in action

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bokeh/util/session_id.py`
Content:
```
1 ''' Utilities for generating and manipulating session IDs.
2
3 A session ID would typically be associated with each browser tab viewing
4 an application or plot. Each session has its own state separate from any
5 other sessions hosted by the server.
6
7 '''
8 from __future__ import absolute_import, print_function
9
10 import base64
11 import codecs
12 import hashlib
13 import hmac
14 import random
15 import time
16
17 from six import binary_type
18
19 from bokeh.settings import settings
20
21 # Use the system PRNG for session id generation (if possible)
22 # NOTE: secure random string generation implementation is adapted
23 # from the Django project. Reference:
24 # https://github.com/django/django/blob/0ed7d155635da9f79d4dd67e4889087d3673c6da/django/utils/crypto.py
25 try:
26 random = random.SystemRandom()
27 using_sysrandom = True
28 except NotImplementedError:
29 import warnings
30 warnings.warn('A secure pseudo-random number generator is not available '
31 'on your system. Falling back to Mersenne Twister.')
32 if settings.secret_key() is None:
33 warnings.warn('A secure pseudo-random number generator is not available '
34 'and no BOKEH_SECRET_KEY has been set. '
35 'Setting a secret key will mitigate the lack of a secure '
36 'generator.')
37 using_sysrandom = False
38
39 def _ensure_bytes(secret_key):
40 if secret_key is None:
41 return None
42 elif isinstance(secret_key, binary_type):
43 return secret_key
44 else:
45 return codecs.encode(secret_key, 'utf-8')
46
47 # this is broken out for unit testability
48 def _reseed_if_needed(using_sysrandom, secret_key):
49 secret_key = _ensure_bytes(secret_key)
50 if not using_sysrandom:
51 # This is ugly, and a hack, but it makes things better than
52 # the alternative of predictability. This re-seeds the PRNG
53 # using a value that is hard for an attacker to predict, every
54 # time a random string is required. This may change the
55 # properties of the chosen random sequence slightly, but this
56 # is better than absolute predictability.
57 random.seed(
58 hashlib.sha256(
59 ("%s%s%s" % (
60 random.getstate(),
61 time.time(),
62 secret_key)).encode('utf-8')
63 ).digest())
64
65 def _base64_encode(decoded):
66 # base64 encode both takes and returns bytes, we want to work with strings.
67 # If 'decoded' isn't bytes already, assume it's utf-8
68 decoded_as_bytes = _ensure_bytes(decoded)
69 encoded = codecs.decode(base64.urlsafe_b64encode(decoded_as_bytes), 'ascii')
70 # remove padding '=' chars that cause trouble
71 return str(encoded.rstrip('='))
72
73 def _signature(base_id, secret_key):
74 secret_key = _ensure_bytes(secret_key)
75 base_id = codecs.encode(base_id, "utf-8")
76 signer = hmac.new(secret_key, base_id, hashlib.sha256)
77 return _base64_encode(signer.digest())
78
79 def _get_random_string(length=44,
80 allowed_chars='abcdefghijklmnopqrstuvwxyz'
81 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',
82 secret_key=settings.secret_key_bytes()):
83 """
84 Return a securely generated random string.
85 With the a-z, A-Z, 0-9 character set:
86 Length 12 is a 71-bit value. log_2((26+26+10)^12) =~ 71
87 Length 44 is a 261-bit value. log_2((26+26+10)^44) = 261
88 """
89 secret_key = _ensure_bytes(secret_key)
90 _reseed_if_needed(using_sysrandom, secret_key)
91 return ''.join(random.choice(allowed_chars) for i in range(length))
92
93 def generate_secret_key():
94 """
95 Generate a new securely-generated secret key appropriate
96 for SHA-256 HMAC signatures. This key could be used to
97 sign Bokeh server session IDs for example.
98 """
99 return _get_random_string()
100
101 def generate_session_id(secret_key=settings.secret_key_bytes(), signed=settings.sign_sessions()):
102 """Generate a random session ID.
103
104 Typically, each browser tab connected to a Bokeh application
105 has its own session ID. In production deployments of a Bokeh
106 app, session IDs should be random and unguessable - otherwise
107 users of the app could interfere with one another.
108
109 If session IDs are signed with a secret key, the server can
110 verify that the generator of the session ID was "authorized"
111 (the generator had to know the secret key). This can be used
112 to have a separate process, such as another web application,
113 which generates new sessions on a Bokeh server. This other
114 process may require users to log in before redirecting them to
115 the Bokeh server with a valid session ID, for example.
116
117 Args:
118 secret_key (str, optional) : Secret key (default: value of 'BOKEH_SECRET_KEY' env var)
119 signed (bool, optional) : Whether to sign the session ID (default: value of
120 'BOKEH_SIGN_SESSIONS' env var)
121
122 """
123 secret_key = _ensure_bytes(secret_key)
124 if signed:
125 # note: '-' can also be in the base64 encoded signature
126 base_id = _get_random_string(secret_key=secret_key)
127 return base_id + '-' + _signature(base_id, secret_key)
128 else:
129 return _get_random_string(secret_key=secret_key)
130
131 def check_session_id_signature(session_id, secret_key=settings.secret_key_bytes(),
132 signed=settings.sign_sessions()):
133 """Check the signature of a session ID, returning True if it's valid.
134
135 The server uses this function to check whether a session ID
136 was generated with the correct secret key. If signed sessions are disabled,
137 this function always returns True.
138
139 Args:
140 session_id (str) : The session ID to check
141 secret_key (str, optional) : Secret key (default: value of 'BOKEH_SECRET_KEY' env var)
142 signed (bool, optional) : Whether to check anything (default: value of
143 'BOKEH_SIGN_SESSIONS' env var)
144
145 """
146 secret_key = _ensure_bytes(secret_key)
147 if signed:
148 pieces = session_id.split('-', 1)
149 if len(pieces) != 2:
150 return False
151 base_id = pieces[0]
152 provided_signature = pieces[1]
153 expected_signature = _signature(base_id, secret_key)
154 # hmac.compare_digest() uses a string compare algorithm that doesn't
155 # short-circuit so we don't allow timing analysis
156 return hmac.compare_digest(expected_signature, provided_signature)
157 else:
158 return True
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bokeh/util/session_id.py b/bokeh/util/session_id.py
--- a/bokeh/util/session_id.py
+++ b/bokeh/util/session_id.py
@@ -17,6 +17,7 @@
from six import binary_type
from bokeh.settings import settings
+from bokeh.util.string import encode_utf8
# Use the system PRNG for session id generation (if possible)
# NOTE: secure random string generation implementation is adapted
@@ -153,6 +154,7 @@
expected_signature = _signature(base_id, secret_key)
# hmac.compare_digest() uses a string compare algorithm that doesn't
# short-circuit so we don't allow timing analysis
- return hmac.compare_digest(expected_signature, provided_signature)
+ # encode_utf8 is used to ensure that strings have same encoding
+ return hmac.compare_digest(encode_utf8(expected_signature), encode_utf8(provided_signature))
else:
return True
| {"golden_diff": "diff --git a/bokeh/util/session_id.py b/bokeh/util/session_id.py\n--- a/bokeh/util/session_id.py\n+++ b/bokeh/util/session_id.py\n@@ -17,6 +17,7 @@\n from six import binary_type\n \n from bokeh.settings import settings\n+from bokeh.util.string import encode_utf8\n \n # Use the system PRNG for session id generation (if possible)\n # NOTE: secure random string generation implementation is adapted\n@@ -153,6 +154,7 @@\n expected_signature = _signature(base_id, secret_key)\n # hmac.compare_digest() uses a string compare algorithm that doesn't\n # short-circuit so we don't allow timing analysis\n- return hmac.compare_digest(expected_signature, provided_signature)\n+ # encode_utf8 is used to ensure that strings have same encoding\n+ return hmac.compare_digest(encode_utf8(expected_signature), encode_utf8(provided_signature))\n else:\n return True\n", "issue": "TypeError: 'unicode' does not have the buffer interface\n#### ALL software version info (bokeh, python, notebook, OS, browser, any other relevant packages)\r\n\r\nbokeh 0.12.6\r\npython 2.7.12\r\n64 bit Ubuntu 16.04\r\nChrome browser\r\n\r\n#### Description of expected behavior and the observed behavior\r\n\r\nI get `500: Internal Server Error` when I try to use signed sessions with bokeh. \r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n\r\nI use `sliders.py` example from Bokeh Gallery. I use the following to run the app and generate the session id:\r\n\r\nRun the app:\r\n```bash\r\nexport BOKEH_SECRET_KEY=KKXYhtUJmQ5f8LiQJdeUGOF4HIeTzf5zOBZmlxrXtF6D\r\nexport BOKEH_SIGN_SESSIONS=True\r\nbokeh serve sliders.py --session-ids external-signed\r\n```\r\n\r\nGenerate session id:\r\n```bash \r\nexport BOKEH_SECRET_KEY=KKXYhtUJmQ5f8LiQJdeUGOF4HIeTzf5zOBZmlxrXtF6D\r\nexport BOKEH_SIGN_SESSIONS=True\r\npython -c \"from bokeh.util.session_id import generate_session_id; print(generate_session_id())\"\r\n```\r\n\r\nExample url to access app:\r\nhttp://localhost:5006/sliders?bokeh-session-id=4RJKVrnFVe60gB5urh9sE3jUnSGDkJAfCwvoaDsoMB8f-W6QAfyDoxORtN7mb6DHAzftAhpfnxVdzC-6gIT13uV0\r\n\r\n#### Stack traceback and/or browser JavaScript console output\r\n\r\nJavaScript console output:\r\n```\r\nFailed to load resource: the server responded with a status of 500 (Internal Server Error)\r\n```\r\n\r\nBokeh traceback:\r\n```\r\n2017-07-21 00:23:58,161 Uncaught exception GET /sliders?bokeh-session-id=4RJKVrnFVe60gB5urh9sE3jUnSGDkJAfCwvoaDsoMB8f-W6QAfyDoxORtN7mb6DHAzftAhpfnxVdzC-6gIT13uV0 (::1)\r\nHTTPServerRequest(protocol='http', host='localhost:5006', method='GET', uri='/sliders?bokeh-session-id=4RJKVrnFVe60gB5urh9sE3jUnSGDkJAfCwvoaDsoMB8f-W6QAfyDoxORtN7mb6DHAzftAhpfnxVdzC-6gIT13uV0', version='HTTP/1.1', remote_ip='::1', headers={'Accept-Language': 'en-US,en;q=0.8,ru;q=0.6,de;q=0.4', 'Accept-Encoding': 'gzip, deflate, br', 'Host': 'localhost:5006', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/59.0.3071.109 Chrome/59.0.3071.109 Safari/537.36', 'Connection': 'keep-alive', 'Cookie': '_xsrf=2|4ab25a11|f7d3fbdf1fab4d2b01135d63518a4f9a|1498856594; username-localhost-8888=\"2|1:0|10:1499014969|23:username-localhost-8888|44:ODVmNmU2NjIwYjUwNDlhYzk2MzY4OWQ5NDU2ZTExYjU=|3a908d5ba83bca558deae2665732f340eeef5ce69a2763c6cef367fd892e22b7\"', 'Upgrade-Insecure-Requests': '1'})\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python2.7/dist-packages/tornado/web.py\", line 1469, in _execute\r\n result = yield result\r\n File \"/usr/local/lib/python2.7/dist-packages/tornado/gen.py\", line 1015, in run\r\n value = future.result()\r\n File \"/usr/local/lib/python2.7/dist-packages/tornado/concurrent.py\", line 237, in result\r\n raise_exc_info(self._exc_info)\r\n File \"/usr/local/lib/python2.7/dist-packages/tornado/gen.py\", line 1021, in run\r\n yielded = self.gen.throw(*exc_info)\r\n File \"/usr/local/lib/python2.7/dist-packages/bokeh/server/views/doc_handler.py\", line 27, in get\r\n session = yield self.get_session()\r\n File \"/usr/local/lib/python2.7/dist-packages/tornado/gen.py\", line 1015, in run\r\n value = future.result()\r\n File \"/usr/local/lib/python2.7/dist-packages/tornado/concurrent.py\", line 237, in result\r\n raise_exc_info(self._exc_info)\r\n File \"/usr/local/lib/python2.7/dist-packages/tornado/gen.py\", line 285, in wrapper\r\n yielded = next(result)\r\n File \"/usr/local/lib/python2.7/dist-packages/bokeh/server/views/session_handler.py\", line 36, in get_session\r\n signed=self.application.sign_sessions):\r\n File \"/usr/local/lib/python2.7/dist-packages/bokeh/util/session_id.py\", line 156, in check_session_id_signature\r\n return hmac.compare_digest(expected_signature, provided_signature)\r\nTypeError: 'unicode' does not have the buffer interface\r\n```\r\n\r\n#### Screenshots or screencasts of the bug in action\r\n\r\n\r\n\n", "before_files": [{"content": "''' Utilities for generating and manipulating session IDs.\n\nA session ID would typically be associated with each browser tab viewing\nan application or plot. Each session has its own state separate from any\nother sessions hosted by the server.\n\n'''\nfrom __future__ import absolute_import, print_function\n\nimport base64\nimport codecs\nimport hashlib\nimport hmac\nimport random\nimport time\n\nfrom six import binary_type\n\nfrom bokeh.settings import settings\n\n# Use the system PRNG for session id generation (if possible)\n# NOTE: secure random string generation implementation is adapted\n# from the Django project. Reference:\n# https://github.com/django/django/blob/0ed7d155635da9f79d4dd67e4889087d3673c6da/django/utils/crypto.py\ntry:\n random = random.SystemRandom()\n using_sysrandom = True\nexcept NotImplementedError:\n import warnings\n warnings.warn('A secure pseudo-random number generator is not available '\n 'on your system. Falling back to Mersenne Twister.')\n if settings.secret_key() is None:\n warnings.warn('A secure pseudo-random number generator is not available '\n 'and no BOKEH_SECRET_KEY has been set. '\n 'Setting a secret key will mitigate the lack of a secure '\n 'generator.')\n using_sysrandom = False\n\ndef _ensure_bytes(secret_key):\n if secret_key is None:\n return None\n elif isinstance(secret_key, binary_type):\n return secret_key\n else:\n return codecs.encode(secret_key, 'utf-8')\n\n# this is broken out for unit testability\ndef _reseed_if_needed(using_sysrandom, secret_key):\n secret_key = _ensure_bytes(secret_key)\n if not using_sysrandom:\n # This is ugly, and a hack, but it makes things better than\n # the alternative of predictability. This re-seeds the PRNG\n # using a value that is hard for an attacker to predict, every\n # time a random string is required. This may change the\n # properties of the chosen random sequence slightly, but this\n # is better than absolute predictability.\n random.seed(\n hashlib.sha256(\n (\"%s%s%s\" % (\n random.getstate(),\n time.time(),\n secret_key)).encode('utf-8')\n ).digest())\n\ndef _base64_encode(decoded):\n # base64 encode both takes and returns bytes, we want to work with strings.\n # If 'decoded' isn't bytes already, assume it's utf-8\n decoded_as_bytes = _ensure_bytes(decoded)\n encoded = codecs.decode(base64.urlsafe_b64encode(decoded_as_bytes), 'ascii')\n # remove padding '=' chars that cause trouble\n return str(encoded.rstrip('='))\n\ndef _signature(base_id, secret_key):\n secret_key = _ensure_bytes(secret_key)\n base_id = codecs.encode(base_id, \"utf-8\")\n signer = hmac.new(secret_key, base_id, hashlib.sha256)\n return _base64_encode(signer.digest())\n\ndef _get_random_string(length=44,\n allowed_chars='abcdefghijklmnopqrstuvwxyz'\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',\n secret_key=settings.secret_key_bytes()):\n \"\"\"\n Return a securely generated random string.\n With the a-z, A-Z, 0-9 character set:\n Length 12 is a 71-bit value. log_2((26+26+10)^12) =~ 71\n Length 44 is a 261-bit value. log_2((26+26+10)^44) = 261\n \"\"\"\n secret_key = _ensure_bytes(secret_key)\n _reseed_if_needed(using_sysrandom, secret_key)\n return ''.join(random.choice(allowed_chars) for i in range(length))\n\ndef generate_secret_key():\n \"\"\"\n Generate a new securely-generated secret key appropriate\n for SHA-256 HMAC signatures. This key could be used to\n sign Bokeh server session IDs for example.\n \"\"\"\n return _get_random_string()\n\ndef generate_session_id(secret_key=settings.secret_key_bytes(), signed=settings.sign_sessions()):\n \"\"\"Generate a random session ID.\n\n Typically, each browser tab connected to a Bokeh application\n has its own session ID. In production deployments of a Bokeh\n app, session IDs should be random and unguessable - otherwise\n users of the app could interfere with one another.\n\n If session IDs are signed with a secret key, the server can\n verify that the generator of the session ID was \"authorized\"\n (the generator had to know the secret key). This can be used\n to have a separate process, such as another web application,\n which generates new sessions on a Bokeh server. This other\n process may require users to log in before redirecting them to\n the Bokeh server with a valid session ID, for example.\n\n Args:\n secret_key (str, optional) : Secret key (default: value of 'BOKEH_SECRET_KEY' env var)\n signed (bool, optional) : Whether to sign the session ID (default: value of\n 'BOKEH_SIGN_SESSIONS' env var)\n\n \"\"\"\n secret_key = _ensure_bytes(secret_key)\n if signed:\n # note: '-' can also be in the base64 encoded signature\n base_id = _get_random_string(secret_key=secret_key)\n return base_id + '-' + _signature(base_id, secret_key)\n else:\n return _get_random_string(secret_key=secret_key)\n\ndef check_session_id_signature(session_id, secret_key=settings.secret_key_bytes(),\n signed=settings.sign_sessions()):\n \"\"\"Check the signature of a session ID, returning True if it's valid.\n\n The server uses this function to check whether a session ID\n was generated with the correct secret key. If signed sessions are disabled,\n this function always returns True.\n\n Args:\n session_id (str) : The session ID to check\n secret_key (str, optional) : Secret key (default: value of 'BOKEH_SECRET_KEY' env var)\n signed (bool, optional) : Whether to check anything (default: value of\n 'BOKEH_SIGN_SESSIONS' env var)\n\n \"\"\"\n secret_key = _ensure_bytes(secret_key)\n if signed:\n pieces = session_id.split('-', 1)\n if len(pieces) != 2:\n return False\n base_id = pieces[0]\n provided_signature = pieces[1]\n expected_signature = _signature(base_id, secret_key)\n # hmac.compare_digest() uses a string compare algorithm that doesn't\n # short-circuit so we don't allow timing analysis\n return hmac.compare_digest(expected_signature, provided_signature)\n else:\n return True\n", "path": "bokeh/util/session_id.py"}], "after_files": [{"content": "''' Utilities for generating and manipulating session IDs.\n\nA session ID would typically be associated with each browser tab viewing\nan application or plot. Each session has its own state separate from any\nother sessions hosted by the server.\n\n'''\nfrom __future__ import absolute_import, print_function\n\nimport base64\nimport codecs\nimport hashlib\nimport hmac\nimport random\nimport time\n\nfrom six import binary_type\n\nfrom bokeh.settings import settings\nfrom bokeh.util.string import encode_utf8\n\n# Use the system PRNG for session id generation (if possible)\n# NOTE: secure random string generation implementation is adapted\n# from the Django project. Reference:\n# https://github.com/django/django/blob/0ed7d155635da9f79d4dd67e4889087d3673c6da/django/utils/crypto.py\ntry:\n random = random.SystemRandom()\n using_sysrandom = True\nexcept NotImplementedError:\n import warnings\n warnings.warn('A secure pseudo-random number generator is not available '\n 'on your system. Falling back to Mersenne Twister.')\n if settings.secret_key() is None:\n warnings.warn('A secure pseudo-random number generator is not available '\n 'and no BOKEH_SECRET_KEY has been set. '\n 'Setting a secret key will mitigate the lack of a secure '\n 'generator.')\n using_sysrandom = False\n\ndef _ensure_bytes(secret_key):\n if secret_key is None:\n return None\n elif isinstance(secret_key, binary_type):\n return secret_key\n else:\n return codecs.encode(secret_key, 'utf-8')\n\n# this is broken out for unit testability\ndef _reseed_if_needed(using_sysrandom, secret_key):\n secret_key = _ensure_bytes(secret_key)\n if not using_sysrandom:\n # This is ugly, and a hack, but it makes things better than\n # the alternative of predictability. This re-seeds the PRNG\n # using a value that is hard for an attacker to predict, every\n # time a random string is required. This may change the\n # properties of the chosen random sequence slightly, but this\n # is better than absolute predictability.\n random.seed(\n hashlib.sha256(\n (\"%s%s%s\" % (\n random.getstate(),\n time.time(),\n secret_key)).encode('utf-8')\n ).digest())\n\ndef _base64_encode(decoded):\n # base64 encode both takes and returns bytes, we want to work with strings.\n # If 'decoded' isn't bytes already, assume it's utf-8\n decoded_as_bytes = _ensure_bytes(decoded)\n encoded = codecs.decode(base64.urlsafe_b64encode(decoded_as_bytes), 'ascii')\n # remove padding '=' chars that cause trouble\n return str(encoded.rstrip('='))\n\ndef _signature(base_id, secret_key):\n secret_key = _ensure_bytes(secret_key)\n base_id = codecs.encode(base_id, \"utf-8\")\n signer = hmac.new(secret_key, base_id, hashlib.sha256)\n return _base64_encode(signer.digest())\n\ndef _get_random_string(length=44,\n allowed_chars='abcdefghijklmnopqrstuvwxyz'\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789',\n secret_key=settings.secret_key_bytes()):\n \"\"\"\n Return a securely generated random string.\n With the a-z, A-Z, 0-9 character set:\n Length 12 is a 71-bit value. log_2((26+26+10)^12) =~ 71\n Length 44 is a 261-bit value. log_2((26+26+10)^44) = 261\n \"\"\"\n secret_key = _ensure_bytes(secret_key)\n _reseed_if_needed(using_sysrandom, secret_key)\n return ''.join(random.choice(allowed_chars) for i in range(length))\n\ndef generate_secret_key():\n \"\"\"\n Generate a new securely-generated secret key appropriate\n for SHA-256 HMAC signatures. This key could be used to\n sign Bokeh server session IDs for example.\n \"\"\"\n return _get_random_string()\n\ndef generate_session_id(secret_key=settings.secret_key_bytes(), signed=settings.sign_sessions()):\n \"\"\"Generate a random session ID.\n\n Typically, each browser tab connected to a Bokeh application\n has its own session ID. In production deployments of a Bokeh\n app, session IDs should be random and unguessable - otherwise\n users of the app could interfere with one another.\n\n If session IDs are signed with a secret key, the server can\n verify that the generator of the session ID was \"authorized\"\n (the generator had to know the secret key). This can be used\n to have a separate process, such as another web application,\n which generates new sessions on a Bokeh server. This other\n process may require users to log in before redirecting them to\n the Bokeh server with a valid session ID, for example.\n\n Args:\n secret_key (str, optional) : Secret key (default: value of 'BOKEH_SECRET_KEY' env var)\n signed (bool, optional) : Whether to sign the session ID (default: value of\n 'BOKEH_SIGN_SESSIONS' env var)\n\n \"\"\"\n secret_key = _ensure_bytes(secret_key)\n if signed:\n # note: '-' can also be in the base64 encoded signature\n base_id = _get_random_string(secret_key=secret_key)\n return base_id + '-' + _signature(base_id, secret_key)\n else:\n return _get_random_string(secret_key=secret_key)\n\ndef check_session_id_signature(session_id, secret_key=settings.secret_key_bytes(),\n signed=settings.sign_sessions()):\n \"\"\"Check the signature of a session ID, returning True if it's valid.\n\n The server uses this function to check whether a session ID\n was generated with the correct secret key. If signed sessions are disabled,\n this function always returns True.\n\n Args:\n session_id (str) : The session ID to check\n secret_key (str, optional) : Secret key (default: value of 'BOKEH_SECRET_KEY' env var)\n signed (bool, optional) : Whether to check anything (default: value of\n 'BOKEH_SIGN_SESSIONS' env var)\n\n \"\"\"\n secret_key = _ensure_bytes(secret_key)\n if signed:\n pieces = session_id.split('-', 1)\n if len(pieces) != 2:\n return False\n base_id = pieces[0]\n provided_signature = pieces[1]\n expected_signature = _signature(base_id, secret_key)\n # hmac.compare_digest() uses a string compare algorithm that doesn't\n # short-circuit so we don't allow timing analysis\n # encode_utf8 is used to ensure that strings have same encoding\n return hmac.compare_digest(encode_utf8(expected_signature), encode_utf8(provided_signature))\n else:\n return True\n", "path": "bokeh/util/session_id.py"}]} | 3,575 | 210 |
gh_patches_debug_25736 | rasdani/github-patches | git_diff | iterative__dvc-6240 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
import: error message when imported file does not exist
## Description
When you import a file from a exernal Git repository but you specify a wrong path you receive a confusing error message.
### Reproduce
```
dvc import [email protected]:iterative/example-get-started data/fake.xml
```
### Expected
A concise error that informs that data/fake.xml does not exist.
### Environment information
```
DVC version: 2.3.0 (pip)
---------------------------------
Platform: Python 3.7.9 on Linux-4.15.0-96-generic-x86_64-with-debian-buster-sid
Supports: http, https
Cache types: symlink
Cache directory: nfs on 10.10.30.245:/NAS/VA
Caches: local
Remotes: None
Workspace directory: ext4 on /dev/sdb1
Repo: dvc, git
```
I would like to help fix this problem, I thought about adding some regression test for https://github.com/iterative/dvc/blob/master/dvc/repo/imp_url.py similar to https://github.com/iterative/dvc/blob/master/tests/func/test_import_url.py#L107 and catch the exception later to return a proper message
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/dependency/repo.py`
Content:
```
1 import os
2 from collections import defaultdict
3 from typing import TYPE_CHECKING, Dict, Optional, Set
4
5 from voluptuous import Required
6
7 from dvc.path_info import PathInfo
8
9 from .base import Dependency
10
11 if TYPE_CHECKING:
12 from dvc.objects.db.base import ObjectDB
13 from dvc.objects.file import HashFile
14
15
16 class RepoDependency(Dependency):
17 PARAM_REPO = "repo"
18 PARAM_URL = "url"
19 PARAM_REV = "rev"
20 PARAM_REV_LOCK = "rev_lock"
21
22 REPO_SCHEMA = {
23 PARAM_REPO: {
24 Required(PARAM_URL): str,
25 PARAM_REV: str,
26 PARAM_REV_LOCK: str,
27 }
28 }
29
30 def __init__(self, def_repo, stage, *args, **kwargs):
31 self.def_repo = def_repo
32 self._staged_objs: Dict[str, "HashFile"] = {}
33 super().__init__(stage, *args, **kwargs)
34
35 def _parse_path(self, fs, path_info):
36 return None
37
38 @property
39 def is_in_repo(self):
40 return False
41
42 def __str__(self):
43 return "{} ({})".format(self.def_path, self.def_repo[self.PARAM_URL])
44
45 def workspace_status(self):
46 current = self.get_obj(locked=True).hash_info
47 updated = self.get_obj(locked=False).hash_info
48
49 if current != updated:
50 return {str(self): "update available"}
51
52 return {}
53
54 def status(self):
55 return self.workspace_status()
56
57 def save(self):
58 pass
59
60 def dumpd(self):
61 return {self.PARAM_PATH: self.def_path, self.PARAM_REPO: self.def_repo}
62
63 def download(self, to, jobs=None):
64 from dvc.checkout import checkout
65 from dvc.objects import save
66 from dvc.objects.db.git import GitObjectDB
67 from dvc.repo.fetch import fetch_from_odb
68
69 for odb, objs in self.get_used_objs().items():
70 if not isinstance(odb, GitObjectDB):
71 fetch_from_odb(self.repo, odb, objs, jobs=jobs)
72
73 obj = self.get_obj()
74 save(self.repo.odb.local, obj, jobs=jobs)
75 checkout(
76 to.path_info,
77 to.fs,
78 obj,
79 self.repo.odb.local,
80 dvcignore=None,
81 state=self.repo.state,
82 )
83
84 def update(self, rev=None):
85 if rev:
86 self.def_repo[self.PARAM_REV] = rev
87 with self._make_repo(locked=False) as repo:
88 self.def_repo[self.PARAM_REV_LOCK] = repo.get_rev()
89
90 def changed_checksum(self):
91 # From current repo point of view what describes RepoDependency is its
92 # origin project url and rev_lock, and it makes RepoDependency
93 # immutable, hence its impossible for checksum to change.
94 return False
95
96 def get_used_objs(
97 self, **kwargs
98 ) -> Dict[Optional["ObjectDB"], Set["HashFile"]]:
99 from dvc.config import NoRemoteError
100 from dvc.exceptions import NoOutputOrStageError
101 from dvc.objects.db.git import GitObjectDB
102 from dvc.objects.stage import stage
103
104 local_odb = self.repo.odb.local
105 locked = kwargs.pop("locked", True)
106 with self._make_repo(
107 locked=locked, cache_dir=local_odb.cache_dir
108 ) as repo:
109 used_objs = defaultdict(set)
110 rev = repo.get_rev()
111 if locked and self.def_repo.get(self.PARAM_REV_LOCK) is None:
112 self.def_repo[self.PARAM_REV_LOCK] = rev
113
114 path_info = PathInfo(repo.root_dir) / str(self.def_path)
115 try:
116 for odb, objs in repo.used_objs(
117 [os.fspath(path_info)],
118 force=True,
119 jobs=kwargs.get("jobs"),
120 recursive=True,
121 ).items():
122 if odb is None:
123 odb = repo.cloud.get_remote().odb
124 self._check_circular_import(odb)
125 used_objs[odb].update(objs)
126 except (NoRemoteError, NoOutputOrStageError):
127 pass
128
129 staged_obj = stage(
130 local_odb,
131 path_info,
132 repo.repo_fs,
133 local_odb.fs.PARAM_CHECKSUM,
134 )
135 self._staged_objs[rev] = staged_obj
136 git_odb = GitObjectDB(repo.repo_fs, repo.root_dir)
137 used_objs[git_odb].add(staged_obj)
138 return used_objs
139
140 def _check_circular_import(self, odb):
141 from dvc.exceptions import CircularImportError
142 from dvc.fs.repo import RepoFileSystem
143
144 if not odb or not isinstance(odb.fs, RepoFileSystem):
145 return
146
147 self_url = self.repo.url or self.repo.root_dir
148 if odb.fs.repo_url is not None and odb.fs.repo_url == self_url:
149 raise CircularImportError(self, odb.fs.repo_url, self_url)
150
151 def get_obj(self, filter_info=None, **kwargs):
152 from dvc.objects.stage import stage
153
154 odb = self.repo.odb.local
155 locked = kwargs.pop("locked", True)
156 with self._make_repo(locked=locked, cache_dir=odb.cache_dir) as repo:
157 rev = repo.get_rev()
158 if locked and self.def_repo.get(self.PARAM_REV_LOCK) is None:
159 self.def_repo[self.PARAM_REV_LOCK] = rev
160 obj = self._staged_objs.get(rev)
161 if obj is not None:
162 return obj
163
164 path_info = PathInfo(repo.root_dir) / str(self.def_path)
165 obj = stage(
166 odb,
167 path_info,
168 repo.repo_fs,
169 odb.fs.PARAM_CHECKSUM,
170 )
171 self._staged_objs[rev] = obj
172 return obj
173
174 def _make_repo(self, locked=True, **kwargs):
175 from dvc.external_repo import external_repo
176
177 d = self.def_repo
178 rev = self._get_rev(locked=locked)
179 return external_repo(d[self.PARAM_URL], rev=rev, **kwargs)
180
181 def _get_rev(self, locked=True):
182 d = self.def_repo
183 return (d.get(self.PARAM_REV_LOCK) if locked else None) or d.get(
184 self.PARAM_REV
185 )
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/dependency/repo.py b/dvc/dependency/repo.py
--- a/dvc/dependency/repo.py
+++ b/dvc/dependency/repo.py
@@ -97,7 +97,7 @@
self, **kwargs
) -> Dict[Optional["ObjectDB"], Set["HashFile"]]:
from dvc.config import NoRemoteError
- from dvc.exceptions import NoOutputOrStageError
+ from dvc.exceptions import NoOutputOrStageError, PathMissingError
from dvc.objects.db.git import GitObjectDB
from dvc.objects.stage import stage
@@ -126,12 +126,18 @@
except (NoRemoteError, NoOutputOrStageError):
pass
- staged_obj = stage(
- local_odb,
- path_info,
- repo.repo_fs,
- local_odb.fs.PARAM_CHECKSUM,
- )
+ try:
+ staged_obj = stage(
+ local_odb,
+ path_info,
+ repo.repo_fs,
+ local_odb.fs.PARAM_CHECKSUM,
+ )
+ except FileNotFoundError as exc:
+ raise PathMissingError(
+ self.def_path, self.def_repo[self.PARAM_URL]
+ ) from exc
+
self._staged_objs[rev] = staged_obj
git_odb = GitObjectDB(repo.repo_fs, repo.root_dir)
used_objs[git_odb].add(staged_obj)
| {"golden_diff": "diff --git a/dvc/dependency/repo.py b/dvc/dependency/repo.py\n--- a/dvc/dependency/repo.py\n+++ b/dvc/dependency/repo.py\n@@ -97,7 +97,7 @@\n self, **kwargs\n ) -> Dict[Optional[\"ObjectDB\"], Set[\"HashFile\"]]:\n from dvc.config import NoRemoteError\n- from dvc.exceptions import NoOutputOrStageError\n+ from dvc.exceptions import NoOutputOrStageError, PathMissingError\n from dvc.objects.db.git import GitObjectDB\n from dvc.objects.stage import stage\n \n@@ -126,12 +126,18 @@\n except (NoRemoteError, NoOutputOrStageError):\n pass\n \n- staged_obj = stage(\n- local_odb,\n- path_info,\n- repo.repo_fs,\n- local_odb.fs.PARAM_CHECKSUM,\n- )\n+ try:\n+ staged_obj = stage(\n+ local_odb,\n+ path_info,\n+ repo.repo_fs,\n+ local_odb.fs.PARAM_CHECKSUM,\n+ )\n+ except FileNotFoundError as exc:\n+ raise PathMissingError(\n+ self.def_path, self.def_repo[self.PARAM_URL]\n+ ) from exc\n+\n self._staged_objs[rev] = staged_obj\n git_odb = GitObjectDB(repo.repo_fs, repo.root_dir)\n used_objs[git_odb].add(staged_obj)\n", "issue": "import: error message when imported file does not exist\n## Description\r\n\r\nWhen you import a file from a exernal Git repository but you specify a wrong path you receive a confusing error message.\r\n\r\n### Reproduce\r\n```\r\ndvc import [email protected]:iterative/example-get-started data/fake.xml\r\n```\r\n\r\n### Expected\r\n\r\nA concise error that informs that data/fake.xml does not exist.\r\n\r\n### Environment information\r\n\r\n```\r\nDVC version: 2.3.0 (pip)\r\n---------------------------------\r\nPlatform: Python 3.7.9 on Linux-4.15.0-96-generic-x86_64-with-debian-buster-sid\r\nSupports: http, https\r\nCache types: symlink\r\nCache directory: nfs on 10.10.30.245:/NAS/VA\r\nCaches: local\r\nRemotes: None\r\nWorkspace directory: ext4 on /dev/sdb1\r\nRepo: dvc, git\r\n```\r\n\r\nI would like to help fix this problem, I thought about adding some regression test for https://github.com/iterative/dvc/blob/master/dvc/repo/imp_url.py similar to https://github.com/iterative/dvc/blob/master/tests/func/test_import_url.py#L107 and catch the exception later to return a proper message\n", "before_files": [{"content": "import os\nfrom collections import defaultdict\nfrom typing import TYPE_CHECKING, Dict, Optional, Set\n\nfrom voluptuous import Required\n\nfrom dvc.path_info import PathInfo\n\nfrom .base import Dependency\n\nif TYPE_CHECKING:\n from dvc.objects.db.base import ObjectDB\n from dvc.objects.file import HashFile\n\n\nclass RepoDependency(Dependency):\n PARAM_REPO = \"repo\"\n PARAM_URL = \"url\"\n PARAM_REV = \"rev\"\n PARAM_REV_LOCK = \"rev_lock\"\n\n REPO_SCHEMA = {\n PARAM_REPO: {\n Required(PARAM_URL): str,\n PARAM_REV: str,\n PARAM_REV_LOCK: str,\n }\n }\n\n def __init__(self, def_repo, stage, *args, **kwargs):\n self.def_repo = def_repo\n self._staged_objs: Dict[str, \"HashFile\"] = {}\n super().__init__(stage, *args, **kwargs)\n\n def _parse_path(self, fs, path_info):\n return None\n\n @property\n def is_in_repo(self):\n return False\n\n def __str__(self):\n return \"{} ({})\".format(self.def_path, self.def_repo[self.PARAM_URL])\n\n def workspace_status(self):\n current = self.get_obj(locked=True).hash_info\n updated = self.get_obj(locked=False).hash_info\n\n if current != updated:\n return {str(self): \"update available\"}\n\n return {}\n\n def status(self):\n return self.workspace_status()\n\n def save(self):\n pass\n\n def dumpd(self):\n return {self.PARAM_PATH: self.def_path, self.PARAM_REPO: self.def_repo}\n\n def download(self, to, jobs=None):\n from dvc.checkout import checkout\n from dvc.objects import save\n from dvc.objects.db.git import GitObjectDB\n from dvc.repo.fetch import fetch_from_odb\n\n for odb, objs in self.get_used_objs().items():\n if not isinstance(odb, GitObjectDB):\n fetch_from_odb(self.repo, odb, objs, jobs=jobs)\n\n obj = self.get_obj()\n save(self.repo.odb.local, obj, jobs=jobs)\n checkout(\n to.path_info,\n to.fs,\n obj,\n self.repo.odb.local,\n dvcignore=None,\n state=self.repo.state,\n )\n\n def update(self, rev=None):\n if rev:\n self.def_repo[self.PARAM_REV] = rev\n with self._make_repo(locked=False) as repo:\n self.def_repo[self.PARAM_REV_LOCK] = repo.get_rev()\n\n def changed_checksum(self):\n # From current repo point of view what describes RepoDependency is its\n # origin project url and rev_lock, and it makes RepoDependency\n # immutable, hence its impossible for checksum to change.\n return False\n\n def get_used_objs(\n self, **kwargs\n ) -> Dict[Optional[\"ObjectDB\"], Set[\"HashFile\"]]:\n from dvc.config import NoRemoteError\n from dvc.exceptions import NoOutputOrStageError\n from dvc.objects.db.git import GitObjectDB\n from dvc.objects.stage import stage\n\n local_odb = self.repo.odb.local\n locked = kwargs.pop(\"locked\", True)\n with self._make_repo(\n locked=locked, cache_dir=local_odb.cache_dir\n ) as repo:\n used_objs = defaultdict(set)\n rev = repo.get_rev()\n if locked and self.def_repo.get(self.PARAM_REV_LOCK) is None:\n self.def_repo[self.PARAM_REV_LOCK] = rev\n\n path_info = PathInfo(repo.root_dir) / str(self.def_path)\n try:\n for odb, objs in repo.used_objs(\n [os.fspath(path_info)],\n force=True,\n jobs=kwargs.get(\"jobs\"),\n recursive=True,\n ).items():\n if odb is None:\n odb = repo.cloud.get_remote().odb\n self._check_circular_import(odb)\n used_objs[odb].update(objs)\n except (NoRemoteError, NoOutputOrStageError):\n pass\n\n staged_obj = stage(\n local_odb,\n path_info,\n repo.repo_fs,\n local_odb.fs.PARAM_CHECKSUM,\n )\n self._staged_objs[rev] = staged_obj\n git_odb = GitObjectDB(repo.repo_fs, repo.root_dir)\n used_objs[git_odb].add(staged_obj)\n return used_objs\n\n def _check_circular_import(self, odb):\n from dvc.exceptions import CircularImportError\n from dvc.fs.repo import RepoFileSystem\n\n if not odb or not isinstance(odb.fs, RepoFileSystem):\n return\n\n self_url = self.repo.url or self.repo.root_dir\n if odb.fs.repo_url is not None and odb.fs.repo_url == self_url:\n raise CircularImportError(self, odb.fs.repo_url, self_url)\n\n def get_obj(self, filter_info=None, **kwargs):\n from dvc.objects.stage import stage\n\n odb = self.repo.odb.local\n locked = kwargs.pop(\"locked\", True)\n with self._make_repo(locked=locked, cache_dir=odb.cache_dir) as repo:\n rev = repo.get_rev()\n if locked and self.def_repo.get(self.PARAM_REV_LOCK) is None:\n self.def_repo[self.PARAM_REV_LOCK] = rev\n obj = self._staged_objs.get(rev)\n if obj is not None:\n return obj\n\n path_info = PathInfo(repo.root_dir) / str(self.def_path)\n obj = stage(\n odb,\n path_info,\n repo.repo_fs,\n odb.fs.PARAM_CHECKSUM,\n )\n self._staged_objs[rev] = obj\n return obj\n\n def _make_repo(self, locked=True, **kwargs):\n from dvc.external_repo import external_repo\n\n d = self.def_repo\n rev = self._get_rev(locked=locked)\n return external_repo(d[self.PARAM_URL], rev=rev, **kwargs)\n\n def _get_rev(self, locked=True):\n d = self.def_repo\n return (d.get(self.PARAM_REV_LOCK) if locked else None) or d.get(\n self.PARAM_REV\n )\n", "path": "dvc/dependency/repo.py"}], "after_files": [{"content": "import os\nfrom collections import defaultdict\nfrom typing import TYPE_CHECKING, Dict, Optional, Set\n\nfrom voluptuous import Required\n\nfrom dvc.path_info import PathInfo\n\nfrom .base import Dependency\n\nif TYPE_CHECKING:\n from dvc.objects.db.base import ObjectDB\n from dvc.objects.file import HashFile\n\n\nclass RepoDependency(Dependency):\n PARAM_REPO = \"repo\"\n PARAM_URL = \"url\"\n PARAM_REV = \"rev\"\n PARAM_REV_LOCK = \"rev_lock\"\n\n REPO_SCHEMA = {\n PARAM_REPO: {\n Required(PARAM_URL): str,\n PARAM_REV: str,\n PARAM_REV_LOCK: str,\n }\n }\n\n def __init__(self, def_repo, stage, *args, **kwargs):\n self.def_repo = def_repo\n self._staged_objs: Dict[str, \"HashFile\"] = {}\n super().__init__(stage, *args, **kwargs)\n\n def _parse_path(self, fs, path_info):\n return None\n\n @property\n def is_in_repo(self):\n return False\n\n def __str__(self):\n return \"{} ({})\".format(self.def_path, self.def_repo[self.PARAM_URL])\n\n def workspace_status(self):\n current = self.get_obj(locked=True).hash_info\n updated = self.get_obj(locked=False).hash_info\n\n if current != updated:\n return {str(self): \"update available\"}\n\n return {}\n\n def status(self):\n return self.workspace_status()\n\n def save(self):\n pass\n\n def dumpd(self):\n return {self.PARAM_PATH: self.def_path, self.PARAM_REPO: self.def_repo}\n\n def download(self, to, jobs=None):\n from dvc.checkout import checkout\n from dvc.objects import save\n from dvc.objects.db.git import GitObjectDB\n from dvc.repo.fetch import fetch_from_odb\n\n for odb, objs in self.get_used_objs().items():\n if not isinstance(odb, GitObjectDB):\n fetch_from_odb(self.repo, odb, objs, jobs=jobs)\n\n obj = self.get_obj()\n save(self.repo.odb.local, obj, jobs=jobs)\n checkout(\n to.path_info,\n to.fs,\n obj,\n self.repo.odb.local,\n dvcignore=None,\n state=self.repo.state,\n )\n\n def update(self, rev=None):\n if rev:\n self.def_repo[self.PARAM_REV] = rev\n with self._make_repo(locked=False) as repo:\n self.def_repo[self.PARAM_REV_LOCK] = repo.get_rev()\n\n def changed_checksum(self):\n # From current repo point of view what describes RepoDependency is its\n # origin project url and rev_lock, and it makes RepoDependency\n # immutable, hence its impossible for checksum to change.\n return False\n\n def get_used_objs(\n self, **kwargs\n ) -> Dict[Optional[\"ObjectDB\"], Set[\"HashFile\"]]:\n from dvc.config import NoRemoteError\n from dvc.exceptions import NoOutputOrStageError, PathMissingError\n from dvc.objects.db.git import GitObjectDB\n from dvc.objects.stage import stage\n\n local_odb = self.repo.odb.local\n locked = kwargs.pop(\"locked\", True)\n with self._make_repo(\n locked=locked, cache_dir=local_odb.cache_dir\n ) as repo:\n used_objs = defaultdict(set)\n rev = repo.get_rev()\n if locked and self.def_repo.get(self.PARAM_REV_LOCK) is None:\n self.def_repo[self.PARAM_REV_LOCK] = rev\n\n path_info = PathInfo(repo.root_dir) / str(self.def_path)\n try:\n for odb, objs in repo.used_objs(\n [os.fspath(path_info)],\n force=True,\n jobs=kwargs.get(\"jobs\"),\n recursive=True,\n ).items():\n if odb is None:\n odb = repo.cloud.get_remote().odb\n self._check_circular_import(odb)\n used_objs[odb].update(objs)\n except (NoRemoteError, NoOutputOrStageError):\n pass\n\n try:\n staged_obj = stage(\n local_odb,\n path_info,\n repo.repo_fs,\n local_odb.fs.PARAM_CHECKSUM,\n )\n except FileNotFoundError as exc:\n raise PathMissingError(\n self.def_path, self.def_repo[self.PARAM_URL]\n ) from exc\n\n self._staged_objs[rev] = staged_obj\n git_odb = GitObjectDB(repo.repo_fs, repo.root_dir)\n used_objs[git_odb].add(staged_obj)\n return used_objs\n\n def _check_circular_import(self, odb):\n from dvc.exceptions import CircularImportError\n from dvc.fs.repo import RepoFileSystem\n\n if not odb or not isinstance(odb.fs, RepoFileSystem):\n return\n\n self_url = self.repo.url or self.repo.root_dir\n if odb.fs.repo_url is not None and odb.fs.repo_url == self_url:\n raise CircularImportError(self, odb.fs.repo_url, self_url)\n\n def get_obj(self, filter_info=None, **kwargs):\n from dvc.objects.stage import stage\n\n odb = self.repo.odb.local\n locked = kwargs.pop(\"locked\", True)\n with self._make_repo(locked=locked, cache_dir=odb.cache_dir) as repo:\n rev = repo.get_rev()\n if locked and self.def_repo.get(self.PARAM_REV_LOCK) is None:\n self.def_repo[self.PARAM_REV_LOCK] = rev\n obj = self._staged_objs.get(rev)\n if obj is not None:\n return obj\n\n path_info = PathInfo(repo.root_dir) / str(self.def_path)\n obj = stage(\n odb,\n path_info,\n repo.repo_fs,\n odb.fs.PARAM_CHECKSUM,\n )\n self._staged_objs[rev] = obj\n return obj\n\n def _make_repo(self, locked=True, **kwargs):\n from dvc.external_repo import external_repo\n\n d = self.def_repo\n rev = self._get_rev(locked=locked)\n return external_repo(d[self.PARAM_URL], rev=rev, **kwargs)\n\n def _get_rev(self, locked=True):\n d = self.def_repo\n return (d.get(self.PARAM_REV_LOCK) if locked else None) or d.get(\n self.PARAM_REV\n )\n", "path": "dvc/dependency/repo.py"}]} | 2,351 | 321 |
gh_patches_debug_4100 | rasdani/github-patches | git_diff | Parsl__parsl-2115 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
grid engine provider hangs when qstat output is large
**Describe the bug**
From Quentin Le Boulc'h;
```
I still have the issue with the qstat command from Parsl which is hitting the 60s timeout. When I have 1000 jobs running every qstat fails, I can see many processes accumulating and Parsl get completely stucked.
I checked I can reproduce the issue by myself running the subprocess command you are using. With several hundreds of jobs running the proc.wait() is waiting forever. However a manual qstat returns the output in less than a second. And after killing the proc.wait() I can check that the stdout is actually here so the command has succeeded.
I can also reproduce the issue with a different command than `qstat`, like a simple script returning several hundreds of lines.
Could you check if there is some issue with your subprocess command?
See https://docs.python.org/3/library/subprocess.html#popen-objects for instance: “This will deadlock when using stdout=PIPE or stderr=PIPE and the child process generates enough output to a pipe such that it blocks waiting for the OS pipe buffer to accept more data. Use Popen.communicate() when using pipes to avoid that.”
```
**Expected behavior**
This should not hang
**Environment**
in2p3, parsl desc branch
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/channels/local/local.py`
Content:
```
1 import copy
2 import logging
3 import os
4 import shutil
5 import subprocess
6
7 from parsl.channels.base import Channel
8 from parsl.channels.errors import FileCopyException
9 from parsl.utils import RepresentationMixin
10
11 logger = logging.getLogger(__name__)
12
13
14 class LocalChannel(Channel, RepresentationMixin):
15 ''' This is not even really a channel, since opening a local shell is not heavy
16 and done so infrequently that they do not need a persistent channel
17 '''
18
19 def __init__(self, userhome=".", envs={}, script_dir=None):
20 ''' Initialize the local channel. script_dir is required by set to a default.
21
22 KwArgs:
23 - userhome (string): (default='.') This is provided as a way to override and set a specific userhome
24 - envs (dict) : A dictionary of env variables to be set when launching the shell
25 - script_dir (string): Directory to place scripts
26 '''
27 self.userhome = os.path.abspath(userhome)
28 self.hostname = "localhost"
29 self.envs = envs
30 local_env = os.environ.copy()
31 self._envs = copy.deepcopy(local_env)
32 self._envs.update(envs)
33 self.script_dir = script_dir
34
35 def execute_wait(self, cmd, walltime=None, envs={}):
36 ''' Synchronously execute a commandline string on the shell.
37
38 Args:
39 - cmd (string) : Commandline string to execute
40 - walltime (int) : walltime in seconds, this is not really used now.
41
42 Kwargs:
43 - envs (dict) : Dictionary of env variables. This will be used
44 to override the envs set at channel initialization.
45
46 Returns:
47 - retcode : Return code from the execution, -1 on fail
48 - stdout : stdout string
49 - stderr : stderr string
50
51 Raises:
52 None.
53 '''
54 retcode = -1
55 stdout = None
56 stderr = None
57
58 current_env = copy.deepcopy(self._envs)
59 current_env.update(envs)
60
61 try:
62 proc = subprocess.Popen(
63 cmd,
64 stdout=subprocess.PIPE,
65 stderr=subprocess.PIPE,
66 cwd=self.userhome,
67 env=current_env,
68 shell=True,
69 preexec_fn=os.setpgrp
70 )
71 proc.wait(timeout=walltime)
72 stdout = proc.stdout.read()
73 stderr = proc.stderr.read()
74 retcode = proc.returncode
75
76 except Exception as e:
77 logger.warning("Execution of command '{}' failed due to \n{}".format(cmd, e))
78 raise
79
80 return (retcode, stdout.decode("utf-8"), stderr.decode("utf-8"))
81
82 def push_file(self, source, dest_dir):
83 ''' If the source files dirpath is the same as dest_dir, a copy
84 is not necessary, and nothing is done. Else a copy is made.
85
86 Args:
87 - source (string) : Path to the source file
88 - dest_dir (string) : Path to the directory to which the files is to be copied
89
90 Returns:
91 - destination_path (String) : Absolute path of the destination file
92
93 Raises:
94 - FileCopyException : If file copy failed.
95 '''
96
97 local_dest = os.path.join(dest_dir, os.path.basename(source))
98
99 # Only attempt to copy if the target dir and source dir are different
100 if os.path.dirname(source) != dest_dir:
101 try:
102 shutil.copyfile(source, local_dest)
103 os.chmod(local_dest, 0o777)
104
105 except OSError as e:
106 raise FileCopyException(e, self.hostname)
107
108 else:
109 os.chmod(local_dest, 0o777)
110
111 return local_dest
112
113 def pull_file(self, remote_source, local_dir):
114 return self.push_file(remote_source, local_dir)
115
116 def close(self):
117 ''' There's nothing to close here, and this really doesn't do anything
118
119 Returns:
120 - False, because it really did not "close" this channel.
121 '''
122 return False
123
124 def isdir(self, path):
125 """Return true if the path refers to an existing directory.
126
127 Parameters
128 ----------
129 path : str
130 Path of directory to check.
131 """
132
133 return os.path.isdir(path)
134
135 def makedirs(self, path, mode=511, exist_ok=False):
136 """Create a directory.
137
138 If intermediate directories do not exist, they will be created.
139
140 Parameters
141 ----------
142 path : str
143 Path of directory to create.
144 mode : int
145 Permissions (posix-style) for the newly-created directory.
146 exist_ok : bool
147 If False, raise an OSError if the target directory already exists.
148 """
149
150 return os.makedirs(path, mode, exist_ok)
151
152 def abspath(self, path):
153 """Return the absolute path.
154
155 Parameters
156 ----------
157 path : str
158 Path for which the absolute path will be returned.
159 """
160 return os.path.abspath(path)
161
162 @property
163 def script_dir(self):
164 return self._script_dir
165
166 @script_dir.setter
167 def script_dir(self, value):
168 if value is not None:
169 value = self.abspath(value)
170 self._script_dir = value
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsl/channels/local/local.py b/parsl/channels/local/local.py
--- a/parsl/channels/local/local.py
+++ b/parsl/channels/local/local.py
@@ -68,9 +68,7 @@
shell=True,
preexec_fn=os.setpgrp
)
- proc.wait(timeout=walltime)
- stdout = proc.stdout.read()
- stderr = proc.stderr.read()
+ (stdout, stderr) = proc.communicate(timeout=walltime)
retcode = proc.returncode
except Exception as e:
| {"golden_diff": "diff --git a/parsl/channels/local/local.py b/parsl/channels/local/local.py\n--- a/parsl/channels/local/local.py\n+++ b/parsl/channels/local/local.py\n@@ -68,9 +68,7 @@\n shell=True,\n preexec_fn=os.setpgrp\n )\n- proc.wait(timeout=walltime)\n- stdout = proc.stdout.read()\n- stderr = proc.stderr.read()\n+ (stdout, stderr) = proc.communicate(timeout=walltime)\n retcode = proc.returncode\n \n except Exception as e:\n", "issue": "grid engine provider hangs when qstat output is large\n**Describe the bug**\r\n\r\nFrom Quentin Le Boulc'h;\r\n\r\n```\r\nI still have the issue with the qstat command from Parsl which is hitting the 60s timeout. When I have 1000 jobs running every qstat fails, I can see many processes accumulating and Parsl get completely stucked.\r\nI checked I can reproduce the issue by myself running the subprocess command you are using. With several hundreds of jobs running the proc.wait() is waiting forever. However a manual qstat returns the output in less than a second. And after killing the proc.wait() I can check that the stdout is actually here so the command has succeeded.\r\nI can also reproduce the issue with a different command than `qstat`, like a simple script returning several hundreds of lines.\r\nCould you check if there is some issue with your subprocess command?\r\nSee https://docs.python.org/3/library/subprocess.html#popen-objects for instance: \u201cThis will deadlock when using stdout=PIPE or stderr=PIPE and the child process generates enough output to a pipe such that it blocks waiting for the OS pipe buffer to accept more data. Use Popen.communicate() when using pipes to avoid that.\u201d\r\n```\r\n\r\n\r\n**Expected behavior**\r\nThis should not hang\r\n\r\n**Environment**\r\nin2p3, parsl desc branch\n", "before_files": [{"content": "import copy\nimport logging\nimport os\nimport shutil\nimport subprocess\n\nfrom parsl.channels.base import Channel\nfrom parsl.channels.errors import FileCopyException\nfrom parsl.utils import RepresentationMixin\n\nlogger = logging.getLogger(__name__)\n\n\nclass LocalChannel(Channel, RepresentationMixin):\n ''' This is not even really a channel, since opening a local shell is not heavy\n and done so infrequently that they do not need a persistent channel\n '''\n\n def __init__(self, userhome=\".\", envs={}, script_dir=None):\n ''' Initialize the local channel. script_dir is required by set to a default.\n\n KwArgs:\n - userhome (string): (default='.') This is provided as a way to override and set a specific userhome\n - envs (dict) : A dictionary of env variables to be set when launching the shell\n - script_dir (string): Directory to place scripts\n '''\n self.userhome = os.path.abspath(userhome)\n self.hostname = \"localhost\"\n self.envs = envs\n local_env = os.environ.copy()\n self._envs = copy.deepcopy(local_env)\n self._envs.update(envs)\n self.script_dir = script_dir\n\n def execute_wait(self, cmd, walltime=None, envs={}):\n ''' Synchronously execute a commandline string on the shell.\n\n Args:\n - cmd (string) : Commandline string to execute\n - walltime (int) : walltime in seconds, this is not really used now.\n\n Kwargs:\n - envs (dict) : Dictionary of env variables. This will be used\n to override the envs set at channel initialization.\n\n Returns:\n - retcode : Return code from the execution, -1 on fail\n - stdout : stdout string\n - stderr : stderr string\n\n Raises:\n None.\n '''\n retcode = -1\n stdout = None\n stderr = None\n\n current_env = copy.deepcopy(self._envs)\n current_env.update(envs)\n\n try:\n proc = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=self.userhome,\n env=current_env,\n shell=True,\n preexec_fn=os.setpgrp\n )\n proc.wait(timeout=walltime)\n stdout = proc.stdout.read()\n stderr = proc.stderr.read()\n retcode = proc.returncode\n\n except Exception as e:\n logger.warning(\"Execution of command '{}' failed due to \\n{}\".format(cmd, e))\n raise\n\n return (retcode, stdout.decode(\"utf-8\"), stderr.decode(\"utf-8\"))\n\n def push_file(self, source, dest_dir):\n ''' If the source files dirpath is the same as dest_dir, a copy\n is not necessary, and nothing is done. Else a copy is made.\n\n Args:\n - source (string) : Path to the source file\n - dest_dir (string) : Path to the directory to which the files is to be copied\n\n Returns:\n - destination_path (String) : Absolute path of the destination file\n\n Raises:\n - FileCopyException : If file copy failed.\n '''\n\n local_dest = os.path.join(dest_dir, os.path.basename(source))\n\n # Only attempt to copy if the target dir and source dir are different\n if os.path.dirname(source) != dest_dir:\n try:\n shutil.copyfile(source, local_dest)\n os.chmod(local_dest, 0o777)\n\n except OSError as e:\n raise FileCopyException(e, self.hostname)\n\n else:\n os.chmod(local_dest, 0o777)\n\n return local_dest\n\n def pull_file(self, remote_source, local_dir):\n return self.push_file(remote_source, local_dir)\n\n def close(self):\n ''' There's nothing to close here, and this really doesn't do anything\n\n Returns:\n - False, because it really did not \"close\" this channel.\n '''\n return False\n\n def isdir(self, path):\n \"\"\"Return true if the path refers to an existing directory.\n\n Parameters\n ----------\n path : str\n Path of directory to check.\n \"\"\"\n\n return os.path.isdir(path)\n\n def makedirs(self, path, mode=511, exist_ok=False):\n \"\"\"Create a directory.\n\n If intermediate directories do not exist, they will be created.\n\n Parameters\n ----------\n path : str\n Path of directory to create.\n mode : int\n Permissions (posix-style) for the newly-created directory.\n exist_ok : bool\n If False, raise an OSError if the target directory already exists.\n \"\"\"\n\n return os.makedirs(path, mode, exist_ok)\n\n def abspath(self, path):\n \"\"\"Return the absolute path.\n\n Parameters\n ----------\n path : str\n Path for which the absolute path will be returned.\n \"\"\"\n return os.path.abspath(path)\n\n @property\n def script_dir(self):\n return self._script_dir\n\n @script_dir.setter\n def script_dir(self, value):\n if value is not None:\n value = self.abspath(value)\n self._script_dir = value\n", "path": "parsl/channels/local/local.py"}], "after_files": [{"content": "import copy\nimport logging\nimport os\nimport shutil\nimport subprocess\n\nfrom parsl.channels.base import Channel\nfrom parsl.channels.errors import FileCopyException\nfrom parsl.utils import RepresentationMixin\n\nlogger = logging.getLogger(__name__)\n\n\nclass LocalChannel(Channel, RepresentationMixin):\n ''' This is not even really a channel, since opening a local shell is not heavy\n and done so infrequently that they do not need a persistent channel\n '''\n\n def __init__(self, userhome=\".\", envs={}, script_dir=None):\n ''' Initialize the local channel. script_dir is required by set to a default.\n\n KwArgs:\n - userhome (string): (default='.') This is provided as a way to override and set a specific userhome\n - envs (dict) : A dictionary of env variables to be set when launching the shell\n - script_dir (string): Directory to place scripts\n '''\n self.userhome = os.path.abspath(userhome)\n self.hostname = \"localhost\"\n self.envs = envs\n local_env = os.environ.copy()\n self._envs = copy.deepcopy(local_env)\n self._envs.update(envs)\n self.script_dir = script_dir\n\n def execute_wait(self, cmd, walltime=None, envs={}):\n ''' Synchronously execute a commandline string on the shell.\n\n Args:\n - cmd (string) : Commandline string to execute\n - walltime (int) : walltime in seconds, this is not really used now.\n\n Kwargs:\n - envs (dict) : Dictionary of env variables. This will be used\n to override the envs set at channel initialization.\n\n Returns:\n - retcode : Return code from the execution, -1 on fail\n - stdout : stdout string\n - stderr : stderr string\n\n Raises:\n None.\n '''\n retcode = -1\n stdout = None\n stderr = None\n\n current_env = copy.deepcopy(self._envs)\n current_env.update(envs)\n\n try:\n proc = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=self.userhome,\n env=current_env,\n shell=True,\n preexec_fn=os.setpgrp\n )\n (stdout, stderr) = proc.communicate(timeout=walltime)\n retcode = proc.returncode\n\n except Exception as e:\n logger.warning(\"Execution of command '{}' failed due to \\n{}\".format(cmd, e))\n raise\n\n return (retcode, stdout.decode(\"utf-8\"), stderr.decode(\"utf-8\"))\n\n def push_file(self, source, dest_dir):\n ''' If the source files dirpath is the same as dest_dir, a copy\n is not necessary, and nothing is done. Else a copy is made.\n\n Args:\n - source (string) : Path to the source file\n - dest_dir (string) : Path to the directory to which the files is to be copied\n\n Returns:\n - destination_path (String) : Absolute path of the destination file\n\n Raises:\n - FileCopyException : If file copy failed.\n '''\n\n local_dest = os.path.join(dest_dir, os.path.basename(source))\n\n # Only attempt to copy if the target dir and source dir are different\n if os.path.dirname(source) != dest_dir:\n try:\n shutil.copyfile(source, local_dest)\n os.chmod(local_dest, 0o777)\n\n except OSError as e:\n raise FileCopyException(e, self.hostname)\n\n else:\n os.chmod(local_dest, 0o777)\n\n return local_dest\n\n def pull_file(self, remote_source, local_dir):\n return self.push_file(remote_source, local_dir)\n\n def close(self):\n ''' There's nothing to close here, and this really doesn't do anything\n\n Returns:\n - False, because it really did not \"close\" this channel.\n '''\n return False\n\n def isdir(self, path):\n \"\"\"Return true if the path refers to an existing directory.\n\n Parameters\n ----------\n path : str\n Path of directory to check.\n \"\"\"\n\n return os.path.isdir(path)\n\n def makedirs(self, path, mode=511, exist_ok=False):\n \"\"\"Create a directory.\n\n If intermediate directories do not exist, they will be created.\n\n Parameters\n ----------\n path : str\n Path of directory to create.\n mode : int\n Permissions (posix-style) for the newly-created directory.\n exist_ok : bool\n If False, raise an OSError if the target directory already exists.\n \"\"\"\n\n return os.makedirs(path, mode, exist_ok)\n\n def abspath(self, path):\n \"\"\"Return the absolute path.\n\n Parameters\n ----------\n path : str\n Path for which the absolute path will be returned.\n \"\"\"\n return os.path.abspath(path)\n\n @property\n def script_dir(self):\n return self._script_dir\n\n @script_dir.setter\n def script_dir(self, value):\n if value is not None:\n value = self.abspath(value)\n self._script_dir = value\n", "path": "parsl/channels/local/local.py"}]} | 2,081 | 126 |
gh_patches_debug_14865 | rasdani/github-patches | git_diff | spacetelescope__jwql-419 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make JWQL pip installable
Currently our `jwql` package is only installable by cloning the repository and running `setup.py`. It would be easier for users (and perhaps easier for us when distributing our code (#294)) if it were also uploaded to PyPI and `pip` installable.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.20.0'
6
7 AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, '
8 AUTHORS += 'Graham Kanarek, Johannes Sahlmann, Lauren Chambers, Catherine Martlin'
9
10 REQUIRES = [
11 'astropy',
12 'astroquery>=0.3.9',
13 'authlib',
14 'bokeh>=1.0',
15 'django>=2.0',
16 'jinja2',
17 'jwedb',
18 'jwst',
19 'matplotlib',
20 'numpy',
21 'numpydoc',
22 'pandas',
23 'psycopg2',
24 'pysiaf',
25 'pytest',
26 'sphinx',
27 'sqlalchemy',
28 'stsci_rtd_theme'
29 ]
30
31 setup(
32 name='jwql',
33 version=VERSION,
34 description='The JWST Quicklook Project',
35 url='https://github.com/spacetelescope/jwql.git',
36 author=AUTHORS,
37 author_email='[email protected]',
38 license='BSD',
39 keywords=['astronomy', 'python'],
40 classifiers=['Programming Language :: Python'],
41 packages=find_packages(),
42 install_requires=REQUIRES,
43 include_package_data=True,
44 include_dirs=[np.get_include()],
45 )
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,8 +4,10 @@
VERSION = '0.20.0'
-AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, '
-AUTHORS += 'Graham Kanarek, Johannes Sahlmann, Lauren Chambers, Catherine Martlin'
+AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '
+AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann'
+
+DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
REQUIRES = [
'astropy',
@@ -31,7 +33,7 @@
setup(
name='jwql',
version=VERSION,
- description='The JWST Quicklook Project',
+ description=DESCRIPTION,
url='https://github.com/spacetelescope/jwql.git',
author=AUTHORS,
author_email='[email protected]',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,8 +4,10 @@\n \n VERSION = '0.20.0'\n \n-AUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, '\n-AUTHORS += 'Graham Kanarek, Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n+AUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\n+AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann'\n+\n+DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n \n REQUIRES = [\n 'astropy',\n@@ -31,7 +33,7 @@\n setup(\n name='jwql',\n version=VERSION,\n- description='The JWST Quicklook Project',\n+ description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n", "issue": "Make JWQL pip installable\nCurrently our `jwql` package is only installable by cloning the repository and running `setup.py`. It would be easier for users (and perhaps easier for us when distributing our code (#294)) if it were also uploaded to PyPI and `pip` installable. \n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.20.0'\n\nAUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, '\nAUTHORS += 'Graham Kanarek, Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n\nREQUIRES = [\n 'astropy',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0',\n 'django>=2.0',\n 'jinja2',\n 'jwedb',\n 'jwst',\n 'matplotlib',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description='The JWST Quicklook Project',\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}], "after_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.20.0'\n\nAUTHORS = 'Matthew Bourque, Lauren Chambers, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nREQUIRES = [\n 'astropy',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0',\n 'django>=2.0',\n 'jinja2',\n 'jwedb',\n 'jwst',\n 'matplotlib',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme'\n]\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}]} | 705 | 244 |
gh_patches_debug_1991 | rasdani/github-patches | git_diff | pypi__warehouse-3056 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Disable 'delete confirm' button until confirmation word is correct
We currently have a modal on `warehouse/templates/manage/settings.html`, that allows the user to confirm that they want to delete their project:

The user is required to enter the project name as an extra security measure. If they get it wrong, we show them this error:

## Proposal
It would be really nice if we could `disable` the delete button until the correct project name is given, e.g.


## Notes
We will have several other delete confirmation modals on other pages, sometimes with multiple modals on a single page (e.g. delete release, delete file) - so the code will need to be written to take this into account.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/utils/project.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from packaging.utils import canonicalize_name
14 from pyramid.httpexceptions import HTTPSeeOther
15
16 from warehouse.packaging.models import (
17 Release, Dependency, File, Role, JournalEntry, release_classifiers
18 )
19
20
21 def confirm_project(project, request, fail_route):
22 confirm = request.POST.get("confirm")
23 project_name = project.normalized_name
24 if not confirm:
25 request.session.flash(
26 "Must confirm the request.",
27 queue="error",
28 )
29 raise HTTPSeeOther(
30 request.route_path(fail_route, project_name=project_name)
31 )
32 if canonicalize_name(confirm) != project.normalized_name:
33 request.session.flash(
34 "Could not delete project - " +
35 f"{confirm!r} is not the same as {project.normalized_name!r}",
36 queue="error",
37 )
38 raise HTTPSeeOther(
39 request.route_path(fail_route, project_name=project_name)
40 )
41
42
43 def remove_project(project, request, flash=True):
44 # TODO: We don't actually delete files from the data store. We should add
45 # some kind of garbage collection at some point.
46
47 request.db.add(
48 JournalEntry(
49 name=project.name,
50 action="remove",
51 submitted_by=request.user,
52 submitted_from=request.remote_addr,
53 )
54 )
55 request.db.query(Role).filter(Role.project == project).delete()
56 request.db.query(File).filter(File.name == project.name).delete()
57 (request.db.query(Dependency).filter(Dependency.name == project.name)
58 .delete())
59 (request.db.execute(release_classifiers.delete()
60 .where(release_classifiers.c.name ==
61 project.name)))
62
63 # Load the following objects into the session and individually delete them
64 # so they are included in `session.deleted` and their cache keys are purged
65
66 # Delete releases first, otherwise they will get cascade-deleted by the
67 # project deletion and won't be purged
68 for release in (
69 request.db.query(Release)
70 .filter(Release.project == project)
71 .all()):
72 request.db.delete(release)
73
74 # Finally, delete the project
75 request.db.delete(project)
76
77 # Flush so we can repeat this multiple times if necessary
78 request.db.flush()
79
80 if flash:
81 request.session.flash(
82 f"Successfully deleted the project {project.name!r}.",
83 queue="success",
84 )
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/utils/project.py b/warehouse/utils/project.py
--- a/warehouse/utils/project.py
+++ b/warehouse/utils/project.py
@@ -19,7 +19,7 @@
def confirm_project(project, request, fail_route):
- confirm = request.POST.get("confirm")
+ confirm = request.POST.get("confirm_project_name")
project_name = project.normalized_name
if not confirm:
request.session.flash(
| {"golden_diff": "diff --git a/warehouse/utils/project.py b/warehouse/utils/project.py\n--- a/warehouse/utils/project.py\n+++ b/warehouse/utils/project.py\n@@ -19,7 +19,7 @@\n \n \n def confirm_project(project, request, fail_route):\n- confirm = request.POST.get(\"confirm\")\n+ confirm = request.POST.get(\"confirm_project_name\")\n project_name = project.normalized_name\n if not confirm:\n request.session.flash(\n", "issue": "Disable 'delete confirm' button until confirmation word is correct\nWe currently have a modal on `warehouse/templates/manage/settings.html`, that allows the user to confirm that they want to delete their project:\r\n\r\n\r\n\r\nThe user is required to enter the project name as an extra security measure. If they get it wrong, we show them this error:\r\n\r\n\r\n\r\n## Proposal\r\n\r\nIt would be really nice if we could `disable` the delete button until the correct project name is given, e.g.\r\n\r\n\r\n\r\n\r\n\r\n## Notes\r\n\r\nWe will have several other delete confirmation modals on other pages, sometimes with multiple modals on a single page (e.g. delete release, delete file) - so the code will need to be written to take this into account.\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom packaging.utils import canonicalize_name\nfrom pyramid.httpexceptions import HTTPSeeOther\n\nfrom warehouse.packaging.models import (\n Release, Dependency, File, Role, JournalEntry, release_classifiers\n)\n\n\ndef confirm_project(project, request, fail_route):\n confirm = request.POST.get(\"confirm\")\n project_name = project.normalized_name\n if not confirm:\n request.session.flash(\n \"Must confirm the request.\",\n queue=\"error\",\n )\n raise HTTPSeeOther(\n request.route_path(fail_route, project_name=project_name)\n )\n if canonicalize_name(confirm) != project.normalized_name:\n request.session.flash(\n \"Could not delete project - \" +\n f\"{confirm!r} is not the same as {project.normalized_name!r}\",\n queue=\"error\",\n )\n raise HTTPSeeOther(\n request.route_path(fail_route, project_name=project_name)\n )\n\n\ndef remove_project(project, request, flash=True):\n # TODO: We don't actually delete files from the data store. We should add\n # some kind of garbage collection at some point.\n\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"remove\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n request.db.query(Role).filter(Role.project == project).delete()\n request.db.query(File).filter(File.name == project.name).delete()\n (request.db.query(Dependency).filter(Dependency.name == project.name)\n .delete())\n (request.db.execute(release_classifiers.delete()\n .where(release_classifiers.c.name ==\n project.name)))\n\n # Load the following objects into the session and individually delete them\n # so they are included in `session.deleted` and their cache keys are purged\n\n # Delete releases first, otherwise they will get cascade-deleted by the\n # project deletion and won't be purged\n for release in (\n request.db.query(Release)\n .filter(Release.project == project)\n .all()):\n request.db.delete(release)\n\n # Finally, delete the project\n request.db.delete(project)\n\n # Flush so we can repeat this multiple times if necessary\n request.db.flush()\n\n if flash:\n request.session.flash(\n f\"Successfully deleted the project {project.name!r}.\",\n queue=\"success\",\n )\n", "path": "warehouse/utils/project.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom packaging.utils import canonicalize_name\nfrom pyramid.httpexceptions import HTTPSeeOther\n\nfrom warehouse.packaging.models import (\n Release, Dependency, File, Role, JournalEntry, release_classifiers\n)\n\n\ndef confirm_project(project, request, fail_route):\n confirm = request.POST.get(\"confirm_project_name\")\n project_name = project.normalized_name\n if not confirm:\n request.session.flash(\n \"Must confirm the request.\",\n queue=\"error\",\n )\n raise HTTPSeeOther(\n request.route_path(fail_route, project_name=project_name)\n )\n if canonicalize_name(confirm) != project.normalized_name:\n request.session.flash(\n \"Could not delete project - \" +\n f\"{confirm!r} is not the same as {project.normalized_name!r}\",\n queue=\"error\",\n )\n raise HTTPSeeOther(\n request.route_path(fail_route, project_name=project_name)\n )\n\n\ndef remove_project(project, request, flash=True):\n # TODO: We don't actually delete files from the data store. We should add\n # some kind of garbage collection at some point.\n\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"remove\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n request.db.query(Role).filter(Role.project == project).delete()\n request.db.query(File).filter(File.name == project.name).delete()\n (request.db.query(Dependency).filter(Dependency.name == project.name)\n .delete())\n (request.db.execute(release_classifiers.delete()\n .where(release_classifiers.c.name ==\n project.name)))\n\n # Load the following objects into the session and individually delete them\n # so they are included in `session.deleted` and their cache keys are purged\n\n # Delete releases first, otherwise they will get cascade-deleted by the\n # project deletion and won't be purged\n for release in (\n request.db.query(Release)\n .filter(Release.project == project)\n .all()):\n request.db.delete(release)\n\n # Finally, delete the project\n request.db.delete(project)\n\n # Flush so we can repeat this multiple times if necessary\n request.db.flush()\n\n if flash:\n request.session.flash(\n f\"Successfully deleted the project {project.name!r}.\",\n queue=\"success\",\n )\n", "path": "warehouse/utils/project.py"}]} | 1,520 | 95 |
gh_patches_debug_5915 | rasdani/github-patches | git_diff | tobymao__sqlglot-1540 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dblink domain throw ParseError for Oracle dialect
If you try to parse the statement below it returns an Invalid expression / unexpected token error
`sql_query = """ SELECT * FROM table_name@dblink_name.database_link_domain;"""
parsed = parse_one(sql_query , read='oracle')`
**Retrieved Error**

Adding DB domain link is optional but some times is needed to specify the complete service name. The syntax is as above
More info here: [Oracle Docs](https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/CREATE-DATABASE-LINK.html#GUID-D966642A-B19E-449D-9968-1121AF06D793)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sqlglot/dialects/oracle.py`
Content:
```
1 from __future__ import annotations
2
3 import typing as t
4
5 from sqlglot import exp, generator, parser, tokens, transforms
6 from sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql
7 from sqlglot.helper import seq_get
8 from sqlglot.tokens import TokenType
9
10
11 def _parse_xml_table(self) -> exp.XMLTable:
12 this = self._parse_string()
13
14 passing = None
15 columns = None
16
17 if self._match_text_seq("PASSING"):
18 # The BY VALUE keywords are optional and are provided for semantic clarity
19 self._match_text_seq("BY", "VALUE")
20 passing = self._parse_csv(self._parse_column)
21
22 by_ref = self._match_text_seq("RETURNING", "SEQUENCE", "BY", "REF")
23
24 if self._match_text_seq("COLUMNS"):
25 columns = self._parse_csv(lambda: self._parse_column_def(self._parse_field(any_token=True)))
26
27 return self.expression(
28 exp.XMLTable,
29 this=this,
30 passing=passing,
31 columns=columns,
32 by_ref=by_ref,
33 )
34
35
36 class Oracle(Dialect):
37 # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212
38 # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes
39 time_mapping = {
40 "AM": "%p", # Meridian indicator with or without periods
41 "A.M.": "%p", # Meridian indicator with or without periods
42 "PM": "%p", # Meridian indicator with or without periods
43 "P.M.": "%p", # Meridian indicator with or without periods
44 "D": "%u", # Day of week (1-7)
45 "DAY": "%A", # name of day
46 "DD": "%d", # day of month (1-31)
47 "DDD": "%j", # day of year (1-366)
48 "DY": "%a", # abbreviated name of day
49 "HH": "%I", # Hour of day (1-12)
50 "HH12": "%I", # alias for HH
51 "HH24": "%H", # Hour of day (0-23)
52 "IW": "%V", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard
53 "MI": "%M", # Minute (0-59)
54 "MM": "%m", # Month (01-12; January = 01)
55 "MON": "%b", # Abbreviated name of month
56 "MONTH": "%B", # Name of month
57 "SS": "%S", # Second (0-59)
58 "WW": "%W", # Week of year (1-53)
59 "YY": "%y", # 15
60 "YYYY": "%Y", # 2015
61 }
62
63 class Parser(parser.Parser):
64 WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}
65
66 FUNCTIONS = {
67 **parser.Parser.FUNCTIONS, # type: ignore
68 "SQUARE": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),
69 }
70
71 FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
72 **parser.Parser.FUNCTION_PARSERS,
73 "XMLTABLE": _parse_xml_table,
74 }
75
76 TYPE_LITERAL_PARSERS = {
77 exp.DataType.Type.DATE: lambda self, this, _: self.expression(
78 exp.DateStrToDate, this=this
79 )
80 }
81
82 def _parse_column(self) -> t.Optional[exp.Expression]:
83 column = super()._parse_column()
84 if column:
85 column.set("join_mark", self._match(TokenType.JOIN_MARKER))
86 return column
87
88 def _parse_hint(self) -> t.Optional[exp.Expression]:
89 if self._match(TokenType.HINT):
90 start = self._curr
91 while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):
92 self._advance()
93
94 if not self._curr:
95 self.raise_error("Expected */ after HINT")
96
97 end = self._tokens[self._index - 3]
98 return exp.Hint(expressions=[self._find_sql(start, end)])
99
100 return None
101
102 class Generator(generator.Generator):
103 LOCKING_READS_SUPPORTED = True
104 JOIN_HINTS = False
105 TABLE_HINTS = False
106
107 TYPE_MAPPING = {
108 **generator.Generator.TYPE_MAPPING, # type: ignore
109 exp.DataType.Type.TINYINT: "NUMBER",
110 exp.DataType.Type.SMALLINT: "NUMBER",
111 exp.DataType.Type.INT: "NUMBER",
112 exp.DataType.Type.BIGINT: "NUMBER",
113 exp.DataType.Type.DECIMAL: "NUMBER",
114 exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
115 exp.DataType.Type.VARCHAR: "VARCHAR2",
116 exp.DataType.Type.NVARCHAR: "NVARCHAR2",
117 exp.DataType.Type.TEXT: "CLOB",
118 exp.DataType.Type.BINARY: "BLOB",
119 exp.DataType.Type.VARBINARY: "BLOB",
120 }
121
122 TRANSFORMS = {
123 **generator.Generator.TRANSFORMS, # type: ignore
124 exp.DateStrToDate: lambda self, e: self.func(
125 "TO_DATE", e.this, exp.Literal.string("YYYY-MM-DD")
126 ),
127 exp.Group: transforms.preprocess([transforms.unalias_group]),
128 exp.Hint: lambda self, e: f" /*+ {self.expressions(e).strip()} */",
129 exp.ILike: no_ilike_sql,
130 exp.IfNull: rename_func("NVL"),
131 exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),
132 exp.StrToTime: lambda self, e: f"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})",
133 exp.Subquery: lambda self, e: self.subquery_sql(e, sep=" "),
134 exp.Substring: rename_func("SUBSTR"),
135 exp.Table: lambda self, e: self.table_sql(e, sep=" "),
136 exp.TimeToStr: lambda self, e: f"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})",
137 exp.ToChar: lambda self, e: self.function_fallback_sql(e),
138 exp.Trim: trim_sql,
139 exp.UnixToTime: lambda self, e: f"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)",
140 }
141
142 PROPERTIES_LOCATION = {
143 **generator.Generator.PROPERTIES_LOCATION, # type: ignore
144 exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
145 }
146
147 LIMIT_FETCH = "FETCH"
148
149 def offset_sql(self, expression: exp.Offset) -> str:
150 return f"{super().offset_sql(expression)} ROWS"
151
152 def column_sql(self, expression: exp.Column) -> str:
153 column = super().column_sql(expression)
154 return f"{column} (+)" if expression.args.get("join_mark") else column
155
156 def xmltable_sql(self, expression: exp.XMLTable) -> str:
157 this = self.sql(expression, "this")
158 passing = self.expressions(expression, key="passing")
159 passing = f"{self.sep()}PASSING{self.seg(passing)}" if passing else ""
160 columns = self.expressions(expression, key="columns")
161 columns = f"{self.sep()}COLUMNS{self.seg(columns)}" if columns else ""
162 by_ref = (
163 f"{self.sep()}RETURNING SEQUENCE BY REF" if expression.args.get("by_ref") else ""
164 )
165 return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
166
167 class Tokenizer(tokens.Tokenizer):
168 KEYWORDS = {
169 **tokens.Tokenizer.KEYWORDS,
170 "(+)": TokenType.JOIN_MARKER,
171 "COLUMNS": TokenType.COLUMN,
172 "MATCH_RECOGNIZE": TokenType.MATCH_RECOGNIZE,
173 "MINUS": TokenType.EXCEPT,
174 "NVARCHAR2": TokenType.NVARCHAR,
175 "RETURNING": TokenType.RETURNING,
176 "START": TokenType.BEGIN,
177 "TOP": TokenType.TOP,
178 "VARCHAR2": TokenType.VARCHAR,
179 }
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py
--- a/sqlglot/dialects/oracle.py
+++ b/sqlglot/dialects/oracle.py
@@ -165,6 +165,8 @@
return f"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}"
class Tokenizer(tokens.Tokenizer):
+ VAR_SINGLE_TOKENS = {"@"}
+
KEYWORDS = {
**tokens.Tokenizer.KEYWORDS,
"(+)": TokenType.JOIN_MARKER,
| {"golden_diff": "diff --git a/sqlglot/dialects/oracle.py b/sqlglot/dialects/oracle.py\n--- a/sqlglot/dialects/oracle.py\n+++ b/sqlglot/dialects/oracle.py\n@@ -165,6 +165,8 @@\n return f\"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}\"\n \n class Tokenizer(tokens.Tokenizer):\n+ VAR_SINGLE_TOKENS = {\"@\"}\n+\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"(+)\": TokenType.JOIN_MARKER,\n", "issue": "dblink domain throw ParseError for Oracle dialect\nIf you try to parse the statement below it returns an Invalid expression / unexpected token error\r\n`sql_query = \"\"\" SELECT * FROM table_name@dblink_name.database_link_domain;\"\"\" \r\nparsed = parse_one(sql_query , read='oracle')`\r\n\r\n**Retrieved Error**\r\n\r\n\r\nAdding DB domain link is optional but some times is needed to specify the complete service name. The syntax is as above\r\nMore info here: [Oracle Docs](https://docs.oracle.com/en/database/oracle/oracle-database/19/sqlrf/CREATE-DATABASE-LINK.html#GUID-D966642A-B19E-449D-9968-1121AF06D793)\n", "before_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _parse_xml_table(self) -> exp.XMLTable:\n this = self._parse_string()\n\n passing = None\n columns = None\n\n if self._match_text_seq(\"PASSING\"):\n # The BY VALUE keywords are optional and are provided for semantic clarity\n self._match_text_seq(\"BY\", \"VALUE\")\n passing = self._parse_csv(self._parse_column)\n\n by_ref = self._match_text_seq(\"RETURNING\", \"SEQUENCE\", \"BY\", \"REF\")\n\n if self._match_text_seq(\"COLUMNS\"):\n columns = self._parse_csv(lambda: self._parse_column_def(self._parse_field(any_token=True)))\n\n return self.expression(\n exp.XMLTable,\n this=this,\n passing=passing,\n columns=columns,\n by_ref=by_ref,\n )\n\n\nclass Oracle(Dialect):\n # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212\n # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes\n time_mapping = {\n \"AM\": \"%p\", # Meridian indicator with or without periods\n \"A.M.\": \"%p\", # Meridian indicator with or without periods\n \"PM\": \"%p\", # Meridian indicator with or without periods\n \"P.M.\": \"%p\", # Meridian indicator with or without periods\n \"D\": \"%u\", # Day of week (1-7)\n \"DAY\": \"%A\", # name of day\n \"DD\": \"%d\", # day of month (1-31)\n \"DDD\": \"%j\", # day of year (1-366)\n \"DY\": \"%a\", # abbreviated name of day\n \"HH\": \"%I\", # Hour of day (1-12)\n \"HH12\": \"%I\", # alias for HH\n \"HH24\": \"%H\", # Hour of day (0-23)\n \"IW\": \"%V\", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard\n \"MI\": \"%M\", # Minute (0-59)\n \"MM\": \"%m\", # Month (01-12; January = 01)\n \"MON\": \"%b\", # Abbreviated name of month\n \"MONTH\": \"%B\", # Name of month\n \"SS\": \"%S\", # Second (0-59)\n \"WW\": \"%W\", # Week of year (1-53)\n \"YY\": \"%y\", # 15\n \"YYYY\": \"%Y\", # 2015\n }\n\n class Parser(parser.Parser):\n WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}\n\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS, # type: ignore\n \"SQUARE\": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),\n }\n\n FUNCTION_PARSERS: t.Dict[str, t.Callable] = {\n **parser.Parser.FUNCTION_PARSERS,\n \"XMLTABLE\": _parse_xml_table,\n }\n\n TYPE_LITERAL_PARSERS = {\n exp.DataType.Type.DATE: lambda self, this, _: self.expression(\n exp.DateStrToDate, this=this\n )\n }\n\n def _parse_column(self) -> t.Optional[exp.Expression]:\n column = super()._parse_column()\n if column:\n column.set(\"join_mark\", self._match(TokenType.JOIN_MARKER))\n return column\n\n def _parse_hint(self) -> t.Optional[exp.Expression]:\n if self._match(TokenType.HINT):\n start = self._curr\n while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):\n self._advance()\n\n if not self._curr:\n self.raise_error(\"Expected */ after HINT\")\n\n end = self._tokens[self._index - 3]\n return exp.Hint(expressions=[self._find_sql(start, end)])\n\n return None\n\n class Generator(generator.Generator):\n LOCKING_READS_SUPPORTED = True\n JOIN_HINTS = False\n TABLE_HINTS = False\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING, # type: ignore\n exp.DataType.Type.TINYINT: \"NUMBER\",\n exp.DataType.Type.SMALLINT: \"NUMBER\",\n exp.DataType.Type.INT: \"NUMBER\",\n exp.DataType.Type.BIGINT: \"NUMBER\",\n exp.DataType.Type.DECIMAL: \"NUMBER\",\n exp.DataType.Type.DOUBLE: \"DOUBLE PRECISION\",\n exp.DataType.Type.VARCHAR: \"VARCHAR2\",\n exp.DataType.Type.NVARCHAR: \"NVARCHAR2\",\n exp.DataType.Type.TEXT: \"CLOB\",\n exp.DataType.Type.BINARY: \"BLOB\",\n exp.DataType.Type.VARBINARY: \"BLOB\",\n }\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS, # type: ignore\n exp.DateStrToDate: lambda self, e: self.func(\n \"TO_DATE\", e.this, exp.Literal.string(\"YYYY-MM-DD\")\n ),\n exp.Group: transforms.preprocess([transforms.unalias_group]),\n exp.Hint: lambda self, e: f\" /*+ {self.expressions(e).strip()} */\",\n exp.ILike: no_ilike_sql,\n exp.IfNull: rename_func(\"NVL\"),\n exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),\n exp.StrToTime: lambda self, e: f\"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.Subquery: lambda self, e: self.subquery_sql(e, sep=\" \"),\n exp.Substring: rename_func(\"SUBSTR\"),\n exp.Table: lambda self, e: self.table_sql(e, sep=\" \"),\n exp.TimeToStr: lambda self, e: f\"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.ToChar: lambda self, e: self.function_fallback_sql(e),\n exp.Trim: trim_sql,\n exp.UnixToTime: lambda self, e: f\"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)\",\n }\n\n PROPERTIES_LOCATION = {\n **generator.Generator.PROPERTIES_LOCATION, # type: ignore\n exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,\n }\n\n LIMIT_FETCH = \"FETCH\"\n\n def offset_sql(self, expression: exp.Offset) -> str:\n return f\"{super().offset_sql(expression)} ROWS\"\n\n def column_sql(self, expression: exp.Column) -> str:\n column = super().column_sql(expression)\n return f\"{column} (+)\" if expression.args.get(\"join_mark\") else column\n\n def xmltable_sql(self, expression: exp.XMLTable) -> str:\n this = self.sql(expression, \"this\")\n passing = self.expressions(expression, key=\"passing\")\n passing = f\"{self.sep()}PASSING{self.seg(passing)}\" if passing else \"\"\n columns = self.expressions(expression, key=\"columns\")\n columns = f\"{self.sep()}COLUMNS{self.seg(columns)}\" if columns else \"\"\n by_ref = (\n f\"{self.sep()}RETURNING SEQUENCE BY REF\" if expression.args.get(\"by_ref\") else \"\"\n )\n return f\"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}\"\n\n class Tokenizer(tokens.Tokenizer):\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"(+)\": TokenType.JOIN_MARKER,\n \"COLUMNS\": TokenType.COLUMN,\n \"MATCH_RECOGNIZE\": TokenType.MATCH_RECOGNIZE,\n \"MINUS\": TokenType.EXCEPT,\n \"NVARCHAR2\": TokenType.NVARCHAR,\n \"RETURNING\": TokenType.RETURNING,\n \"START\": TokenType.BEGIN,\n \"TOP\": TokenType.TOP,\n \"VARCHAR2\": TokenType.VARCHAR,\n }\n", "path": "sqlglot/dialects/oracle.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport typing as t\n\nfrom sqlglot import exp, generator, parser, tokens, transforms\nfrom sqlglot.dialects.dialect import Dialect, no_ilike_sql, rename_func, trim_sql\nfrom sqlglot.helper import seq_get\nfrom sqlglot.tokens import TokenType\n\n\ndef _parse_xml_table(self) -> exp.XMLTable:\n this = self._parse_string()\n\n passing = None\n columns = None\n\n if self._match_text_seq(\"PASSING\"):\n # The BY VALUE keywords are optional and are provided for semantic clarity\n self._match_text_seq(\"BY\", \"VALUE\")\n passing = self._parse_csv(self._parse_column)\n\n by_ref = self._match_text_seq(\"RETURNING\", \"SEQUENCE\", \"BY\", \"REF\")\n\n if self._match_text_seq(\"COLUMNS\"):\n columns = self._parse_csv(lambda: self._parse_column_def(self._parse_field(any_token=True)))\n\n return self.expression(\n exp.XMLTable,\n this=this,\n passing=passing,\n columns=columns,\n by_ref=by_ref,\n )\n\n\nclass Oracle(Dialect):\n # https://docs.oracle.com/database/121/SQLRF/sql_elements004.htm#SQLRF00212\n # https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes\n time_mapping = {\n \"AM\": \"%p\", # Meridian indicator with or without periods\n \"A.M.\": \"%p\", # Meridian indicator with or without periods\n \"PM\": \"%p\", # Meridian indicator with or without periods\n \"P.M.\": \"%p\", # Meridian indicator with or without periods\n \"D\": \"%u\", # Day of week (1-7)\n \"DAY\": \"%A\", # name of day\n \"DD\": \"%d\", # day of month (1-31)\n \"DDD\": \"%j\", # day of year (1-366)\n \"DY\": \"%a\", # abbreviated name of day\n \"HH\": \"%I\", # Hour of day (1-12)\n \"HH12\": \"%I\", # alias for HH\n \"HH24\": \"%H\", # Hour of day (0-23)\n \"IW\": \"%V\", # Calendar week of year (1-52 or 1-53), as defined by the ISO 8601 standard\n \"MI\": \"%M\", # Minute (0-59)\n \"MM\": \"%m\", # Month (01-12; January = 01)\n \"MON\": \"%b\", # Abbreviated name of month\n \"MONTH\": \"%B\", # Name of month\n \"SS\": \"%S\", # Second (0-59)\n \"WW\": \"%W\", # Week of year (1-53)\n \"YY\": \"%y\", # 15\n \"YYYY\": \"%Y\", # 2015\n }\n\n class Parser(parser.Parser):\n WINDOW_BEFORE_PAREN_TOKENS = {TokenType.OVER, TokenType.KEEP}\n\n FUNCTIONS = {\n **parser.Parser.FUNCTIONS, # type: ignore\n \"SQUARE\": lambda args: exp.Pow(this=seq_get(args, 0), expression=exp.Literal.number(2)),\n }\n\n FUNCTION_PARSERS: t.Dict[str, t.Callable] = {\n **parser.Parser.FUNCTION_PARSERS,\n \"XMLTABLE\": _parse_xml_table,\n }\n\n TYPE_LITERAL_PARSERS = {\n exp.DataType.Type.DATE: lambda self, this, _: self.expression(\n exp.DateStrToDate, this=this\n )\n }\n\n def _parse_column(self) -> t.Optional[exp.Expression]:\n column = super()._parse_column()\n if column:\n column.set(\"join_mark\", self._match(TokenType.JOIN_MARKER))\n return column\n\n def _parse_hint(self) -> t.Optional[exp.Expression]:\n if self._match(TokenType.HINT):\n start = self._curr\n while self._curr and not self._match_pair(TokenType.STAR, TokenType.SLASH):\n self._advance()\n\n if not self._curr:\n self.raise_error(\"Expected */ after HINT\")\n\n end = self._tokens[self._index - 3]\n return exp.Hint(expressions=[self._find_sql(start, end)])\n\n return None\n\n class Generator(generator.Generator):\n LOCKING_READS_SUPPORTED = True\n JOIN_HINTS = False\n TABLE_HINTS = False\n\n TYPE_MAPPING = {\n **generator.Generator.TYPE_MAPPING, # type: ignore\n exp.DataType.Type.TINYINT: \"NUMBER\",\n exp.DataType.Type.SMALLINT: \"NUMBER\",\n exp.DataType.Type.INT: \"NUMBER\",\n exp.DataType.Type.BIGINT: \"NUMBER\",\n exp.DataType.Type.DECIMAL: \"NUMBER\",\n exp.DataType.Type.DOUBLE: \"DOUBLE PRECISION\",\n exp.DataType.Type.VARCHAR: \"VARCHAR2\",\n exp.DataType.Type.NVARCHAR: \"NVARCHAR2\",\n exp.DataType.Type.TEXT: \"CLOB\",\n exp.DataType.Type.BINARY: \"BLOB\",\n exp.DataType.Type.VARBINARY: \"BLOB\",\n }\n\n TRANSFORMS = {\n **generator.Generator.TRANSFORMS, # type: ignore\n exp.DateStrToDate: lambda self, e: self.func(\n \"TO_DATE\", e.this, exp.Literal.string(\"YYYY-MM-DD\")\n ),\n exp.Group: transforms.preprocess([transforms.unalias_group]),\n exp.Hint: lambda self, e: f\" /*+ {self.expressions(e).strip()} */\",\n exp.ILike: no_ilike_sql,\n exp.IfNull: rename_func(\"NVL\"),\n exp.Select: transforms.preprocess([transforms.eliminate_distinct_on]),\n exp.StrToTime: lambda self, e: f\"TO_TIMESTAMP({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.Subquery: lambda self, e: self.subquery_sql(e, sep=\" \"),\n exp.Substring: rename_func(\"SUBSTR\"),\n exp.Table: lambda self, e: self.table_sql(e, sep=\" \"),\n exp.TimeToStr: lambda self, e: f\"TO_CHAR({self.sql(e, 'this')}, {self.format_time(e)})\",\n exp.ToChar: lambda self, e: self.function_fallback_sql(e),\n exp.Trim: trim_sql,\n exp.UnixToTime: lambda self, e: f\"TO_DATE('1970-01-01','YYYY-MM-DD') + ({self.sql(e, 'this')} / 86400)\",\n }\n\n PROPERTIES_LOCATION = {\n **generator.Generator.PROPERTIES_LOCATION, # type: ignore\n exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,\n }\n\n LIMIT_FETCH = \"FETCH\"\n\n def offset_sql(self, expression: exp.Offset) -> str:\n return f\"{super().offset_sql(expression)} ROWS\"\n\n def column_sql(self, expression: exp.Column) -> str:\n column = super().column_sql(expression)\n return f\"{column} (+)\" if expression.args.get(\"join_mark\") else column\n\n def xmltable_sql(self, expression: exp.XMLTable) -> str:\n this = self.sql(expression, \"this\")\n passing = self.expressions(expression, key=\"passing\")\n passing = f\"{self.sep()}PASSING{self.seg(passing)}\" if passing else \"\"\n columns = self.expressions(expression, key=\"columns\")\n columns = f\"{self.sep()}COLUMNS{self.seg(columns)}\" if columns else \"\"\n by_ref = (\n f\"{self.sep()}RETURNING SEQUENCE BY REF\" if expression.args.get(\"by_ref\") else \"\"\n )\n return f\"XMLTABLE({self.sep('')}{self.indent(this + passing + by_ref + columns)}{self.seg(')', sep='')}\"\n\n class Tokenizer(tokens.Tokenizer):\n VAR_SINGLE_TOKENS = {\"@\"}\n\n KEYWORDS = {\n **tokens.Tokenizer.KEYWORDS,\n \"(+)\": TokenType.JOIN_MARKER,\n \"COLUMNS\": TokenType.COLUMN,\n \"MATCH_RECOGNIZE\": TokenType.MATCH_RECOGNIZE,\n \"MINUS\": TokenType.EXCEPT,\n \"NVARCHAR2\": TokenType.NVARCHAR,\n \"RETURNING\": TokenType.RETURNING,\n \"START\": TokenType.BEGIN,\n \"TOP\": TokenType.TOP,\n \"VARCHAR2\": TokenType.VARCHAR,\n }\n", "path": "sqlglot/dialects/oracle.py"}]} | 2,784 | 137 |
gh_patches_debug_12284 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1185 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cors in serverless transform trips error
*cfn-lint version: 0.24.8
*Description of issue.*
Attempting to use CORS in API resource causes error.
E0001 Error transforming template: Resource with id [myserviceAwsUserApi] is invalid. Cors works only with inline Swagger specified in 'DefinitionBody' property
template_clean.yaml:1:1
Commenting out the Cors on lines 141-143 removes the problem:
[myservice_clean.yaml.txt](https://github.com/aws-cloudformation/cfn-python-lint/files/3816455/myservice_clean.yaml.txt)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/transform.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 import os
18 import logging
19 import six
20 import samtranslator
21 from samtranslator.parser import parser
22 from samtranslator.translator.translator import Translator
23 from samtranslator.public.exceptions import InvalidDocumentException
24
25 from cfnlint.helpers import load_resources, convert_dict, format_json_string
26 from cfnlint.rules import Match, TransformError
27 LOGGER = logging.getLogger('cfnlint')
28
29
30 class Transform(object):
31 """
32 Application Serverless Module tranform Wrappor.
33 Based on code from AWS SAM CLI:
34 https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/commands/validate/lib/sam_template_validator.py
35 """
36
37 def __init__(self, filename, template, region):
38 """
39 Initialize Transform class
40 """
41 self._filename = filename
42 self._template = template
43 self._region = region
44 self._parameters = {}
45
46 self._managed_policy_map = self.load_managed_policies()
47 self._sam_parser = parser.Parser()
48
49 def template(self):
50 """Get the template"""
51 return self._template
52
53 def load_managed_policies(self):
54 """
55 Load the ManagedPolicies locally, based on the AWS-CLI:
56 https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/lib/samlib/default_managed_policies.json
57 """
58 return load_resources('data/Serverless/ManagedPolicies.json')
59
60 def _replace_local_codeuri(self):
61 """
62 Replaces the CodeUri in AWS::Serverless::Function and DefinitionUri in
63 AWS::Serverless::Api to a fake S3 Uri. This is to support running the
64 SAM Translator with valid values for these fields. If this in not done,
65 the template is invalid in the eyes of SAM Translator (the translator
66 does not support local paths)
67 """
68
69 all_resources = self._template.get('Resources', {})
70
71 for _, resource in all_resources.items():
72
73 resource_type = resource.get('Type')
74 resource_dict = resource.get('Properties')
75
76 if resource_type == 'AWS::Serverless::Function':
77
78 Transform._update_to_s3_uri('CodeUri', resource_dict)
79 auto_publish_alias = resource_dict.get('AutoPublishAlias')
80 if isinstance(auto_publish_alias, dict):
81 if len(auto_publish_alias) == 1:
82 for k, v in auto_publish_alias.items():
83 if k == 'Ref':
84 if v in self._template.get('Parameters'):
85 self._parameters[v] = 'Alias'
86 if resource_type in ['AWS::Serverless::LayerVersion']:
87 if resource_dict.get('ContentUri'):
88 Transform._update_to_s3_uri('ContentUri', resource_dict)
89 if resource_type == 'AWS::Serverless::Application':
90 if resource_dict.get('Location'):
91 resource_dict['Location'] = ''
92 Transform._update_to_s3_uri('Location', resource_dict)
93 if resource_type == 'AWS::Serverless::Api':
94 if ('DefinitionBody' not in resource_dict and
95 'Auth' not in resource_dict):
96 Transform._update_to_s3_uri('DefinitionUri', resource_dict)
97 else:
98 resource_dict['DefinitionBody'] = ''
99
100 def transform_template(self):
101 """
102 Transform the Template using the Serverless Application Model.
103 """
104 matches = []
105
106 try:
107 # Output the SAM Translator version in debug mode
108 LOGGER.info('SAM Translator: %s', samtranslator.__version__)
109
110 sam_translator = Translator(
111 managed_policy_map=self._managed_policy_map,
112 sam_parser=self._sam_parser)
113
114 self._replace_local_codeuri()
115
116 # Tell SAM to use the region we're linting in, this has to be
117 # controlled using the default AWS mechanisms, see also:
118 # https://github.com/awslabs/serverless-application-model/blob/master/samtranslator/translator/arn_generator.py
119 LOGGER.info('Setting AWS_DEFAULT_REGION to %s', self._region)
120 os.environ['AWS_DEFAULT_REGION'] = self._region
121
122 self._template = convert_dict(
123 sam_translator.translate(sam_template=self._template,
124 parameter_values=self._parameters))
125
126 LOGGER.info('Transformed template: \n%s',
127 format_json_string(self._template))
128 except InvalidDocumentException as e:
129 message = 'Error transforming template: {0}'
130 for cause in e.causes:
131 matches.append(Match(
132 1, 1,
133 1, 1,
134 self._filename,
135 TransformError(), message.format(cause.message)))
136 except Exception as e: # pylint: disable=W0703
137 LOGGER.debug('Error transforming template: %s', str(e))
138 LOGGER.debug('Stack trace: %s', e, exc_info=True)
139 message = 'Error transforming template: {0}'
140 matches.append(Match(
141 1, 1,
142 1, 1,
143 self._filename,
144 TransformError(), message.format(str(e))))
145
146 return matches
147
148 @staticmethod
149 def is_s3_uri(uri):
150 """
151 Checks the uri and determines if it is a valid S3 Uri
152 Parameters
153 ----------
154 uri str, required
155 Uri to check
156 Returns
157 -------
158 bool
159 Returns True if the uri given is an S3 uri, otherwise False
160 """
161 return isinstance(uri, six.string_types) and uri.startswith('s3://')
162
163 @staticmethod
164 def _update_to_s3_uri(
165 property_key, resource_property_dict,
166 s3_uri_value='s3://bucket/value'):
167 """
168 Updates the 'property_key' in the 'resource_property_dict' to the
169 value of 's3_uri_value'
170 Note: The function will mutate the resource_property_dict that is pass
171 in Parameters
172 ----------
173 property_key str, required
174 Key in the resource_property_dict
175 resource_property_dict dict, required
176 Property dictionary of a Resource in the template to replace
177 s3_uri_value str, optional
178 Value to update the value of the property_key to
179 """
180 uri_property = resource_property_dict.get(property_key, '.')
181
182 # ignore if dict or already an S3 Uri
183 if isinstance(uri_property, dict) or Transform.is_s3_uri(uri_property):
184 return
185
186 resource_property_dict[property_key] = s3_uri_value
187
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/transform.py b/src/cfnlint/transform.py
--- a/src/cfnlint/transform.py
+++ b/src/cfnlint/transform.py
@@ -92,7 +92,7 @@
Transform._update_to_s3_uri('Location', resource_dict)
if resource_type == 'AWS::Serverless::Api':
if ('DefinitionBody' not in resource_dict and
- 'Auth' not in resource_dict):
+ 'Auth' not in resource_dict and 'Cors' not in resource_dict):
Transform._update_to_s3_uri('DefinitionUri', resource_dict)
else:
resource_dict['DefinitionBody'] = ''
| {"golden_diff": "diff --git a/src/cfnlint/transform.py b/src/cfnlint/transform.py\n--- a/src/cfnlint/transform.py\n+++ b/src/cfnlint/transform.py\n@@ -92,7 +92,7 @@\n Transform._update_to_s3_uri('Location', resource_dict)\n if resource_type == 'AWS::Serverless::Api':\n if ('DefinitionBody' not in resource_dict and\n- 'Auth' not in resource_dict):\n+ 'Auth' not in resource_dict and 'Cors' not in resource_dict):\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n else:\n resource_dict['DefinitionBody'] = ''\n", "issue": "Cors in serverless transform trips error \n*cfn-lint version: 0.24.8\r\n\r\n*Description of issue.*\r\nAttempting to use CORS in API resource causes error.\r\n\r\nE0001 Error transforming template: Resource with id [myserviceAwsUserApi] is invalid. Cors works only with inline Swagger specified in 'DefinitionBody' property\r\ntemplate_clean.yaml:1:1\r\n\r\nCommenting out the Cors on lines 141-143 removes the problem:\r\n\r\n\r\n\r\n[myservice_clean.yaml.txt](https://github.com/aws-cloudformation/cfn-python-lint/files/3816455/myservice_clean.yaml.txt)\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport os\nimport logging\nimport six\nimport samtranslator\nfrom samtranslator.parser import parser\nfrom samtranslator.translator.translator import Translator\nfrom samtranslator.public.exceptions import InvalidDocumentException\n\nfrom cfnlint.helpers import load_resources, convert_dict, format_json_string\nfrom cfnlint.rules import Match, TransformError\nLOGGER = logging.getLogger('cfnlint')\n\n\nclass Transform(object):\n \"\"\"\n Application Serverless Module tranform Wrappor.\n Based on code from AWS SAM CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/commands/validate/lib/sam_template_validator.py\n \"\"\"\n\n def __init__(self, filename, template, region):\n \"\"\"\n Initialize Transform class\n \"\"\"\n self._filename = filename\n self._template = template\n self._region = region\n self._parameters = {}\n\n self._managed_policy_map = self.load_managed_policies()\n self._sam_parser = parser.Parser()\n\n def template(self):\n \"\"\"Get the template\"\"\"\n return self._template\n\n def load_managed_policies(self):\n \"\"\"\n Load the ManagedPolicies locally, based on the AWS-CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/lib/samlib/default_managed_policies.json\n \"\"\"\n return load_resources('data/Serverless/ManagedPolicies.json')\n\n def _replace_local_codeuri(self):\n \"\"\"\n Replaces the CodeUri in AWS::Serverless::Function and DefinitionUri in\n AWS::Serverless::Api to a fake S3 Uri. This is to support running the\n SAM Translator with valid values for these fields. If this in not done,\n the template is invalid in the eyes of SAM Translator (the translator\n does not support local paths)\n \"\"\"\n\n all_resources = self._template.get('Resources', {})\n\n for _, resource in all_resources.items():\n\n resource_type = resource.get('Type')\n resource_dict = resource.get('Properties')\n\n if resource_type == 'AWS::Serverless::Function':\n\n Transform._update_to_s3_uri('CodeUri', resource_dict)\n auto_publish_alias = resource_dict.get('AutoPublishAlias')\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n for k, v in auto_publish_alias.items():\n if k == 'Ref':\n if v in self._template.get('Parameters'):\n self._parameters[v] = 'Alias'\n if resource_type in ['AWS::Serverless::LayerVersion']:\n if resource_dict.get('ContentUri'):\n Transform._update_to_s3_uri('ContentUri', resource_dict)\n if resource_type == 'AWS::Serverless::Application':\n if resource_dict.get('Location'):\n resource_dict['Location'] = ''\n Transform._update_to_s3_uri('Location', resource_dict)\n if resource_type == 'AWS::Serverless::Api':\n if ('DefinitionBody' not in resource_dict and\n 'Auth' not in resource_dict):\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n else:\n resource_dict['DefinitionBody'] = ''\n\n def transform_template(self):\n \"\"\"\n Transform the Template using the Serverless Application Model.\n \"\"\"\n matches = []\n\n try:\n # Output the SAM Translator version in debug mode\n LOGGER.info('SAM Translator: %s', samtranslator.__version__)\n\n sam_translator = Translator(\n managed_policy_map=self._managed_policy_map,\n sam_parser=self._sam_parser)\n\n self._replace_local_codeuri()\n\n # Tell SAM to use the region we're linting in, this has to be\n # controlled using the default AWS mechanisms, see also:\n # https://github.com/awslabs/serverless-application-model/blob/master/samtranslator/translator/arn_generator.py\n LOGGER.info('Setting AWS_DEFAULT_REGION to %s', self._region)\n os.environ['AWS_DEFAULT_REGION'] = self._region\n\n self._template = convert_dict(\n sam_translator.translate(sam_template=self._template,\n parameter_values=self._parameters))\n\n LOGGER.info('Transformed template: \\n%s',\n format_json_string(self._template))\n except InvalidDocumentException as e:\n message = 'Error transforming template: {0}'\n for cause in e.causes:\n matches.append(Match(\n 1, 1,\n 1, 1,\n self._filename,\n TransformError(), message.format(cause.message)))\n except Exception as e: # pylint: disable=W0703\n LOGGER.debug('Error transforming template: %s', str(e))\n LOGGER.debug('Stack trace: %s', e, exc_info=True)\n message = 'Error transforming template: {0}'\n matches.append(Match(\n 1, 1,\n 1, 1,\n self._filename,\n TransformError(), message.format(str(e))))\n\n return matches\n\n @staticmethod\n def is_s3_uri(uri):\n \"\"\"\n Checks the uri and determines if it is a valid S3 Uri\n Parameters\n ----------\n uri str, required\n Uri to check\n Returns\n -------\n bool\n Returns True if the uri given is an S3 uri, otherwise False\n \"\"\"\n return isinstance(uri, six.string_types) and uri.startswith('s3://')\n\n @staticmethod\n def _update_to_s3_uri(\n property_key, resource_property_dict,\n s3_uri_value='s3://bucket/value'):\n \"\"\"\n Updates the 'property_key' in the 'resource_property_dict' to the\n value of 's3_uri_value'\n Note: The function will mutate the resource_property_dict that is pass\n in Parameters\n ----------\n property_key str, required\n Key in the resource_property_dict\n resource_property_dict dict, required\n Property dictionary of a Resource in the template to replace\n s3_uri_value str, optional\n Value to update the value of the property_key to\n \"\"\"\n uri_property = resource_property_dict.get(property_key, '.')\n\n # ignore if dict or already an S3 Uri\n if isinstance(uri_property, dict) or Transform.is_s3_uri(uri_property):\n return\n\n resource_property_dict[property_key] = s3_uri_value\n", "path": "src/cfnlint/transform.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport os\nimport logging\nimport six\nimport samtranslator\nfrom samtranslator.parser import parser\nfrom samtranslator.translator.translator import Translator\nfrom samtranslator.public.exceptions import InvalidDocumentException\n\nfrom cfnlint.helpers import load_resources, convert_dict, format_json_string\nfrom cfnlint.rules import Match, TransformError\nLOGGER = logging.getLogger('cfnlint')\n\n\nclass Transform(object):\n \"\"\"\n Application Serverless Module tranform Wrappor.\n Based on code from AWS SAM CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/commands/validate/lib/sam_template_validator.py\n \"\"\"\n\n def __init__(self, filename, template, region):\n \"\"\"\n Initialize Transform class\n \"\"\"\n self._filename = filename\n self._template = template\n self._region = region\n self._parameters = {}\n\n self._managed_policy_map = self.load_managed_policies()\n self._sam_parser = parser.Parser()\n\n def template(self):\n \"\"\"Get the template\"\"\"\n return self._template\n\n def load_managed_policies(self):\n \"\"\"\n Load the ManagedPolicies locally, based on the AWS-CLI:\n https://github.com/awslabs/aws-sam-cli/blob/develop/samcli/lib/samlib/default_managed_policies.json\n \"\"\"\n return load_resources('data/Serverless/ManagedPolicies.json')\n\n def _replace_local_codeuri(self):\n \"\"\"\n Replaces the CodeUri in AWS::Serverless::Function and DefinitionUri in\n AWS::Serverless::Api to a fake S3 Uri. This is to support running the\n SAM Translator with valid values for these fields. If this in not done,\n the template is invalid in the eyes of SAM Translator (the translator\n does not support local paths)\n \"\"\"\n\n all_resources = self._template.get('Resources', {})\n\n for _, resource in all_resources.items():\n\n resource_type = resource.get('Type')\n resource_dict = resource.get('Properties')\n\n if resource_type == 'AWS::Serverless::Function':\n\n Transform._update_to_s3_uri('CodeUri', resource_dict)\n auto_publish_alias = resource_dict.get('AutoPublishAlias')\n if isinstance(auto_publish_alias, dict):\n if len(auto_publish_alias) == 1:\n for k, v in auto_publish_alias.items():\n if k == 'Ref':\n if v in self._template.get('Parameters'):\n self._parameters[v] = 'Alias'\n if resource_type in ['AWS::Serverless::LayerVersion']:\n if resource_dict.get('ContentUri'):\n Transform._update_to_s3_uri('ContentUri', resource_dict)\n if resource_type == 'AWS::Serverless::Application':\n if resource_dict.get('Location'):\n resource_dict['Location'] = ''\n Transform._update_to_s3_uri('Location', resource_dict)\n if resource_type == 'AWS::Serverless::Api':\n if ('DefinitionBody' not in resource_dict and\n 'Auth' not in resource_dict and 'Cors' not in resource_dict):\n Transform._update_to_s3_uri('DefinitionUri', resource_dict)\n else:\n resource_dict['DefinitionBody'] = ''\n\n def transform_template(self):\n \"\"\"\n Transform the Template using the Serverless Application Model.\n \"\"\"\n matches = []\n\n try:\n # Output the SAM Translator version in debug mode\n LOGGER.info('SAM Translator: %s', samtranslator.__version__)\n\n sam_translator = Translator(\n managed_policy_map=self._managed_policy_map,\n sam_parser=self._sam_parser)\n\n self._replace_local_codeuri()\n\n # Tell SAM to use the region we're linting in, this has to be\n # controlled using the default AWS mechanisms, see also:\n # https://github.com/awslabs/serverless-application-model/blob/master/samtranslator/translator/arn_generator.py\n LOGGER.info('Setting AWS_DEFAULT_REGION to %s', self._region)\n os.environ['AWS_DEFAULT_REGION'] = self._region\n\n self._template = convert_dict(\n sam_translator.translate(sam_template=self._template,\n parameter_values=self._parameters))\n\n LOGGER.info('Transformed template: \\n%s',\n format_json_string(self._template))\n except InvalidDocumentException as e:\n message = 'Error transforming template: {0}'\n for cause in e.causes:\n matches.append(Match(\n 1, 1,\n 1, 1,\n self._filename,\n TransformError(), message.format(cause.message)))\n except Exception as e: # pylint: disable=W0703\n LOGGER.debug('Error transforming template: %s', str(e))\n LOGGER.debug('Stack trace: %s', e, exc_info=True)\n message = 'Error transforming template: {0}'\n matches.append(Match(\n 1, 1,\n 1, 1,\n self._filename,\n TransformError(), message.format(str(e))))\n\n return matches\n\n @staticmethod\n def is_s3_uri(uri):\n \"\"\"\n Checks the uri and determines if it is a valid S3 Uri\n Parameters\n ----------\n uri str, required\n Uri to check\n Returns\n -------\n bool\n Returns True if the uri given is an S3 uri, otherwise False\n \"\"\"\n return isinstance(uri, six.string_types) and uri.startswith('s3://')\n\n @staticmethod\n def _update_to_s3_uri(\n property_key, resource_property_dict,\n s3_uri_value='s3://bucket/value'):\n \"\"\"\n Updates the 'property_key' in the 'resource_property_dict' to the\n value of 's3_uri_value'\n Note: The function will mutate the resource_property_dict that is pass\n in Parameters\n ----------\n property_key str, required\n Key in the resource_property_dict\n resource_property_dict dict, required\n Property dictionary of a Resource in the template to replace\n s3_uri_value str, optional\n Value to update the value of the property_key to\n \"\"\"\n uri_property = resource_property_dict.get(property_key, '.')\n\n # ignore if dict or already an S3 Uri\n if isinstance(uri_property, dict) or Transform.is_s3_uri(uri_property):\n return\n\n resource_property_dict[property_key] = s3_uri_value\n", "path": "src/cfnlint/transform.py"}]} | 2,413 | 146 |
gh_patches_debug_26857 | rasdani/github-patches | git_diff | team-ocean__veros-272 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Compute initial streamfunction from initial velocity
Not knowing the numerics, and possibly not reading the docs carefully enough, it is unclear to me how to initialize the velocities in the model.
For a channel run, re-entrant in x, with intial velocity 0.1 m/s everywhere, no forcing, I tried do in `set_initial_conditions`: `s.u = update(vs.u, at[...], 0.1 * vs.maskU[..., None])`.
The velocity signal only lasts for one time step, and then it is gone. It _does_ create a small pressure perturbations that drive internal waves, but the mean flow of 0.1 m/s is immediately gone. Conversely, the initial conditions have psi=0 everywhere, and then immediately on the next time step there is a stream function, but if the units are really m^3/s it is far too small.
Was I to initialize psi at the beginning instead of u, or in addition to u?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `veros/core/external/streamfunction_init.py`
Content:
```
1 from veros import logger, veros_kernel, veros_routine, KernelOutput
2 from veros.variables import allocate
3 from veros.distributed import global_max
4 from veros.core import utilities as mainutils
5 from veros.core.operators import numpy as npx, update, at
6 from veros.core.external import island, line_integrals
7 from veros.core.external.solvers import get_linear_solver
8
9
10 @veros_routine
11 def get_isleperim(state):
12 """
13 preprocess land map using MOMs algorithm for B-grid to determine number of islands
14 """
15 from veros.state import resize_dimension
16
17 vs = state.variables
18
19 island.isleperim(state)
20
21 # now that we know the number of islands we can resize
22 # all arrays depending on that
23 nisle = int(global_max(npx.max(vs.land_map)))
24 resize_dimension(state, "isle", nisle)
25 vs.isle = npx.arange(nisle)
26
27
28 @veros_routine
29 def streamfunction_init(state):
30 """
31 prepare for island integrals
32 """
33 vs = state.variables
34 settings = state.settings
35
36 logger.info("Initializing streamfunction method")
37
38 get_isleperim(state)
39
40 vs.update(boundary_masks(state))
41
42 # populate linear solver cache
43 linear_solver = get_linear_solver(state)
44
45 """
46 precalculate time independent boundary components of streamfunction
47 """
48 forc = allocate(state.dimensions, ("xt", "yt"))
49
50 vs.psin = update(vs.psin, at[...], vs.maskZ[..., -1, npx.newaxis])
51
52 for isle in range(state.dimensions["isle"]):
53 logger.info(f" Solving for boundary contribution by island {isle:d}")
54 isle_boundary = (
55 vs.line_dir_east_mask[..., isle]
56 | vs.line_dir_west_mask[..., isle]
57 | vs.line_dir_north_mask[..., isle]
58 | vs.line_dir_south_mask[..., isle]
59 )
60 isle_sol = linear_solver.solve(state, forc, vs.psin[:, :, isle], boundary_val=isle_boundary)
61 vs.psin = update(vs.psin, at[:, :, isle], isle_sol)
62
63 vs.psin = mainutils.enforce_boundaries(vs.psin, settings.enable_cyclic_x)
64
65 line_psin_out = island_integrals(state)
66 vs.update(line_psin_out)
67
68
69 @veros_kernel
70 def island_integrals(state):
71 """
72 precalculate time independent island integrals
73 """
74 vs = state.variables
75
76 uloc = allocate(state.dimensions, ("xt", "yt", "isle"))
77 vloc = allocate(state.dimensions, ("xt", "yt", "isle"))
78
79 uloc = update(
80 uloc,
81 at[1:, 1:, :],
82 -(vs.psin[1:, 1:, :] - vs.psin[1:, :-1, :])
83 * vs.maskU[1:, 1:, -1, npx.newaxis]
84 / vs.dyt[npx.newaxis, 1:, npx.newaxis]
85 * vs.hur[1:, 1:, npx.newaxis],
86 )
87
88 vloc = update(
89 vloc,
90 at[1:, 1:, ...],
91 (vs.psin[1:, 1:, :] - vs.psin[:-1, 1:, :])
92 * vs.maskV[1:, 1:, -1, npx.newaxis]
93 / (vs.cosu[npx.newaxis, 1:, npx.newaxis] * vs.dxt[1:, npx.newaxis, npx.newaxis])
94 * vs.hvr[1:, 1:, npx.newaxis],
95 )
96
97 vs.line_psin = line_integrals.line_integrals(state, uloc=uloc, vloc=vloc, kind="full")
98 return KernelOutput(line_psin=vs.line_psin)
99
100
101 @veros_kernel
102 def boundary_masks(state):
103 """
104 now that the number of islands is known we can allocate the rest of the variables
105 """
106 vs = state.variables
107 settings = state.settings
108
109 boundary_map = vs.land_map[..., npx.newaxis] == npx.arange(1, state.dimensions["isle"] + 1)
110
111 if settings.enable_cyclic_x:
112 vs.line_dir_east_mask = update(
113 vs.line_dir_east_mask, at[2:-2, 1:-1], boundary_map[3:-1, 1:-1] & ~boundary_map[3:-1, 2:]
114 )
115 vs.line_dir_west_mask = update(
116 vs.line_dir_west_mask, at[2:-2, 1:-1], boundary_map[2:-2, 2:] & ~boundary_map[2:-2, 1:-1]
117 )
118 vs.line_dir_south_mask = update(
119 vs.line_dir_south_mask, at[2:-2, 1:-1], boundary_map[2:-2, 1:-1] & ~boundary_map[3:-1, 1:-1]
120 )
121 vs.line_dir_north_mask = update(
122 vs.line_dir_north_mask, at[2:-2, 1:-1], boundary_map[3:-1, 2:] & ~boundary_map[2:-2, 2:]
123 )
124 else:
125 vs.line_dir_east_mask = update(
126 vs.line_dir_east_mask, at[1:-1, 1:-1], boundary_map[2:, 1:-1] & ~boundary_map[2:, 2:]
127 )
128 vs.line_dir_west_mask = update(
129 vs.line_dir_west_mask, at[1:-1, 1:-1], boundary_map[1:-1, 2:] & ~boundary_map[1:-1, 1:-1]
130 )
131 vs.line_dir_south_mask = update(
132 vs.line_dir_south_mask, at[1:-1, 1:-1], boundary_map[1:-1, 1:-1] & ~boundary_map[2:, 1:-1]
133 )
134 vs.line_dir_north_mask = update(
135 vs.line_dir_north_mask, at[1:-1, 1:-1], boundary_map[2:, 2:] & ~boundary_map[1:-1, 2:]
136 )
137
138 vs.isle_boundary_mask = ~npx.any(
139 vs.line_dir_east_mask | vs.line_dir_west_mask | vs.line_dir_south_mask | vs.line_dir_north_mask, axis=2
140 )
141
142 return KernelOutput(
143 isle_boundary_mask=vs.isle_boundary_mask,
144 line_dir_east_mask=vs.line_dir_east_mask,
145 line_dir_west_mask=vs.line_dir_west_mask,
146 line_dir_south_mask=vs.line_dir_south_mask,
147 line_dir_north_mask=vs.line_dir_north_mask,
148 )
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/veros/core/external/streamfunction_init.py b/veros/core/external/streamfunction_init.py
--- a/veros/core/external/streamfunction_init.py
+++ b/veros/core/external/streamfunction_init.py
@@ -3,7 +3,7 @@
from veros.distributed import global_max
from veros.core import utilities as mainutils
from veros.core.operators import numpy as npx, update, at
-from veros.core.external import island, line_integrals
+from veros.core.external import island, line_integrals, solve_stream
from veros.core.external.solvers import get_linear_solver
@@ -65,6 +65,29 @@
line_psin_out = island_integrals(state)
vs.update(line_psin_out)
+ """
+ take care of initial velocity
+ """
+
+ # transfer initial velocity to tendency
+ vs.du = update(vs.du, at[..., vs.tau], vs.u[..., vs.tau] / settings.dt_mom / (1.5 + settings.AB_eps))
+ vs.dv = update(vs.dv, at[..., vs.tau], vs.v[..., vs.tau] / settings.dt_mom / (1.5 + settings.AB_eps))
+ vs.u = update(vs.u, at[...], 0)
+ vs.v = update(vs.v, at[...], 0)
+
+ # run streamfunction solver to determine initial barotropic and baroclinic modes
+ solve_stream.solve_streamfunction(state)
+
+ vs.psi = update(vs.psi, at[...], vs.psi[..., vs.taup1, npx.newaxis])
+ vs.u = update(
+ vs.u, at[...], mainutils.enforce_boundaries(vs.u[..., vs.taup1, npx.newaxis], settings.enable_cyclic_x)
+ )
+ vs.v = update(
+ vs.v, at[...], mainutils.enforce_boundaries(vs.v[..., vs.taup1, npx.newaxis], settings.enable_cyclic_x)
+ )
+ vs.du = update(vs.du, at[..., vs.tau], 0)
+ vs.dv = update(vs.dv, at[..., vs.tau], 0)
+
@veros_kernel
def island_integrals(state):
| {"golden_diff": "diff --git a/veros/core/external/streamfunction_init.py b/veros/core/external/streamfunction_init.py\n--- a/veros/core/external/streamfunction_init.py\n+++ b/veros/core/external/streamfunction_init.py\n@@ -3,7 +3,7 @@\n from veros.distributed import global_max\n from veros.core import utilities as mainutils\n from veros.core.operators import numpy as npx, update, at\n-from veros.core.external import island, line_integrals\n+from veros.core.external import island, line_integrals, solve_stream\n from veros.core.external.solvers import get_linear_solver\n \n \n@@ -65,6 +65,29 @@\n line_psin_out = island_integrals(state)\n vs.update(line_psin_out)\n \n+ \"\"\"\n+ take care of initial velocity\n+ \"\"\"\n+\n+ # transfer initial velocity to tendency\n+ vs.du = update(vs.du, at[..., vs.tau], vs.u[..., vs.tau] / settings.dt_mom / (1.5 + settings.AB_eps))\n+ vs.dv = update(vs.dv, at[..., vs.tau], vs.v[..., vs.tau] / settings.dt_mom / (1.5 + settings.AB_eps))\n+ vs.u = update(vs.u, at[...], 0)\n+ vs.v = update(vs.v, at[...], 0)\n+\n+ # run streamfunction solver to determine initial barotropic and baroclinic modes\n+ solve_stream.solve_streamfunction(state)\n+\n+ vs.psi = update(vs.psi, at[...], vs.psi[..., vs.taup1, npx.newaxis])\n+ vs.u = update(\n+ vs.u, at[...], mainutils.enforce_boundaries(vs.u[..., vs.taup1, npx.newaxis], settings.enable_cyclic_x)\n+ )\n+ vs.v = update(\n+ vs.v, at[...], mainutils.enforce_boundaries(vs.v[..., vs.taup1, npx.newaxis], settings.enable_cyclic_x)\n+ )\n+ vs.du = update(vs.du, at[..., vs.tau], 0)\n+ vs.dv = update(vs.dv, at[..., vs.tau], 0)\n+\n \n @veros_kernel\n def island_integrals(state):\n", "issue": "Compute initial streamfunction from initial velocity\nNot knowing the numerics, and possibly not reading the docs carefully enough, it is unclear to me how to initialize the velocities in the model. \r\n\r\nFor a channel run, re-entrant in x, with intial velocity 0.1 m/s everywhere, no forcing, I tried do in `set_initial_conditions`: `s.u = update(vs.u, at[...], 0.1 * vs.maskU[..., None])`. \r\n\r\nThe velocity signal only lasts for one time step, and then it is gone. It _does_ create a small pressure perturbations that drive internal waves, but the mean flow of 0.1 m/s is immediately gone. Conversely, the initial conditions have psi=0 everywhere, and then immediately on the next time step there is a stream function, but if the units are really m^3/s it is far too small. \r\n\r\nWas I to initialize psi at the beginning instead of u, or in addition to u? \n", "before_files": [{"content": "from veros import logger, veros_kernel, veros_routine, KernelOutput\nfrom veros.variables import allocate\nfrom veros.distributed import global_max\nfrom veros.core import utilities as mainutils\nfrom veros.core.operators import numpy as npx, update, at\nfrom veros.core.external import island, line_integrals\nfrom veros.core.external.solvers import get_linear_solver\n\n\n@veros_routine\ndef get_isleperim(state):\n \"\"\"\n preprocess land map using MOMs algorithm for B-grid to determine number of islands\n \"\"\"\n from veros.state import resize_dimension\n\n vs = state.variables\n\n island.isleperim(state)\n\n # now that we know the number of islands we can resize\n # all arrays depending on that\n nisle = int(global_max(npx.max(vs.land_map)))\n resize_dimension(state, \"isle\", nisle)\n vs.isle = npx.arange(nisle)\n\n\n@veros_routine\ndef streamfunction_init(state):\n \"\"\"\n prepare for island integrals\n \"\"\"\n vs = state.variables\n settings = state.settings\n\n logger.info(\"Initializing streamfunction method\")\n\n get_isleperim(state)\n\n vs.update(boundary_masks(state))\n\n # populate linear solver cache\n linear_solver = get_linear_solver(state)\n\n \"\"\"\n precalculate time independent boundary components of streamfunction\n \"\"\"\n forc = allocate(state.dimensions, (\"xt\", \"yt\"))\n\n vs.psin = update(vs.psin, at[...], vs.maskZ[..., -1, npx.newaxis])\n\n for isle in range(state.dimensions[\"isle\"]):\n logger.info(f\" Solving for boundary contribution by island {isle:d}\")\n isle_boundary = (\n vs.line_dir_east_mask[..., isle]\n | vs.line_dir_west_mask[..., isle]\n | vs.line_dir_north_mask[..., isle]\n | vs.line_dir_south_mask[..., isle]\n )\n isle_sol = linear_solver.solve(state, forc, vs.psin[:, :, isle], boundary_val=isle_boundary)\n vs.psin = update(vs.psin, at[:, :, isle], isle_sol)\n\n vs.psin = mainutils.enforce_boundaries(vs.psin, settings.enable_cyclic_x)\n\n line_psin_out = island_integrals(state)\n vs.update(line_psin_out)\n\n\n@veros_kernel\ndef island_integrals(state):\n \"\"\"\n precalculate time independent island integrals\n \"\"\"\n vs = state.variables\n\n uloc = allocate(state.dimensions, (\"xt\", \"yt\", \"isle\"))\n vloc = allocate(state.dimensions, (\"xt\", \"yt\", \"isle\"))\n\n uloc = update(\n uloc,\n at[1:, 1:, :],\n -(vs.psin[1:, 1:, :] - vs.psin[1:, :-1, :])\n * vs.maskU[1:, 1:, -1, npx.newaxis]\n / vs.dyt[npx.newaxis, 1:, npx.newaxis]\n * vs.hur[1:, 1:, npx.newaxis],\n )\n\n vloc = update(\n vloc,\n at[1:, 1:, ...],\n (vs.psin[1:, 1:, :] - vs.psin[:-1, 1:, :])\n * vs.maskV[1:, 1:, -1, npx.newaxis]\n / (vs.cosu[npx.newaxis, 1:, npx.newaxis] * vs.dxt[1:, npx.newaxis, npx.newaxis])\n * vs.hvr[1:, 1:, npx.newaxis],\n )\n\n vs.line_psin = line_integrals.line_integrals(state, uloc=uloc, vloc=vloc, kind=\"full\")\n return KernelOutput(line_psin=vs.line_psin)\n\n\n@veros_kernel\ndef boundary_masks(state):\n \"\"\"\n now that the number of islands is known we can allocate the rest of the variables\n \"\"\"\n vs = state.variables\n settings = state.settings\n\n boundary_map = vs.land_map[..., npx.newaxis] == npx.arange(1, state.dimensions[\"isle\"] + 1)\n\n if settings.enable_cyclic_x:\n vs.line_dir_east_mask = update(\n vs.line_dir_east_mask, at[2:-2, 1:-1], boundary_map[3:-1, 1:-1] & ~boundary_map[3:-1, 2:]\n )\n vs.line_dir_west_mask = update(\n vs.line_dir_west_mask, at[2:-2, 1:-1], boundary_map[2:-2, 2:] & ~boundary_map[2:-2, 1:-1]\n )\n vs.line_dir_south_mask = update(\n vs.line_dir_south_mask, at[2:-2, 1:-1], boundary_map[2:-2, 1:-1] & ~boundary_map[3:-1, 1:-1]\n )\n vs.line_dir_north_mask = update(\n vs.line_dir_north_mask, at[2:-2, 1:-1], boundary_map[3:-1, 2:] & ~boundary_map[2:-2, 2:]\n )\n else:\n vs.line_dir_east_mask = update(\n vs.line_dir_east_mask, at[1:-1, 1:-1], boundary_map[2:, 1:-1] & ~boundary_map[2:, 2:]\n )\n vs.line_dir_west_mask = update(\n vs.line_dir_west_mask, at[1:-1, 1:-1], boundary_map[1:-1, 2:] & ~boundary_map[1:-1, 1:-1]\n )\n vs.line_dir_south_mask = update(\n vs.line_dir_south_mask, at[1:-1, 1:-1], boundary_map[1:-1, 1:-1] & ~boundary_map[2:, 1:-1]\n )\n vs.line_dir_north_mask = update(\n vs.line_dir_north_mask, at[1:-1, 1:-1], boundary_map[2:, 2:] & ~boundary_map[1:-1, 2:]\n )\n\n vs.isle_boundary_mask = ~npx.any(\n vs.line_dir_east_mask | vs.line_dir_west_mask | vs.line_dir_south_mask | vs.line_dir_north_mask, axis=2\n )\n\n return KernelOutput(\n isle_boundary_mask=vs.isle_boundary_mask,\n line_dir_east_mask=vs.line_dir_east_mask,\n line_dir_west_mask=vs.line_dir_west_mask,\n line_dir_south_mask=vs.line_dir_south_mask,\n line_dir_north_mask=vs.line_dir_north_mask,\n )\n", "path": "veros/core/external/streamfunction_init.py"}], "after_files": [{"content": "from veros import logger, veros_kernel, veros_routine, KernelOutput\nfrom veros.variables import allocate\nfrom veros.distributed import global_max\nfrom veros.core import utilities as mainutils\nfrom veros.core.operators import numpy as npx, update, at\nfrom veros.core.external import island, line_integrals, solve_stream\nfrom veros.core.external.solvers import get_linear_solver\n\n\n@veros_routine\ndef get_isleperim(state):\n \"\"\"\n preprocess land map using MOMs algorithm for B-grid to determine number of islands\n \"\"\"\n from veros.state import resize_dimension\n\n vs = state.variables\n\n island.isleperim(state)\n\n # now that we know the number of islands we can resize\n # all arrays depending on that\n nisle = int(global_max(npx.max(vs.land_map)))\n resize_dimension(state, \"isle\", nisle)\n vs.isle = npx.arange(nisle)\n\n\n@veros_routine\ndef streamfunction_init(state):\n \"\"\"\n prepare for island integrals\n \"\"\"\n vs = state.variables\n settings = state.settings\n\n logger.info(\"Initializing streamfunction method\")\n\n get_isleperim(state)\n\n vs.update(boundary_masks(state))\n\n # populate linear solver cache\n linear_solver = get_linear_solver(state)\n\n \"\"\"\n precalculate time independent boundary components of streamfunction\n \"\"\"\n forc = allocate(state.dimensions, (\"xt\", \"yt\"))\n\n vs.psin = update(vs.psin, at[...], vs.maskZ[..., -1, npx.newaxis])\n\n for isle in range(state.dimensions[\"isle\"]):\n logger.info(f\" Solving for boundary contribution by island {isle:d}\")\n isle_boundary = (\n vs.line_dir_east_mask[..., isle]\n | vs.line_dir_west_mask[..., isle]\n | vs.line_dir_north_mask[..., isle]\n | vs.line_dir_south_mask[..., isle]\n )\n isle_sol = linear_solver.solve(state, forc, vs.psin[:, :, isle], boundary_val=isle_boundary)\n vs.psin = update(vs.psin, at[:, :, isle], isle_sol)\n\n vs.psin = mainutils.enforce_boundaries(vs.psin, settings.enable_cyclic_x)\n\n line_psin_out = island_integrals(state)\n vs.update(line_psin_out)\n\n \"\"\"\n take care of initial velocity\n \"\"\"\n\n # transfer initial velocity to tendency\n vs.du = update(vs.du, at[..., vs.tau], vs.u[..., vs.tau] / settings.dt_mom / (1.5 + settings.AB_eps))\n vs.dv = update(vs.dv, at[..., vs.tau], vs.v[..., vs.tau] / settings.dt_mom / (1.5 + settings.AB_eps))\n vs.u = update(vs.u, at[...], 0)\n vs.v = update(vs.v, at[...], 0)\n\n # run streamfunction solver to determine initial barotropic and baroclinic modes\n solve_stream.solve_streamfunction(state)\n\n vs.psi = update(vs.psi, at[...], vs.psi[..., vs.taup1, npx.newaxis])\n vs.u = update(\n vs.u, at[...], mainutils.enforce_boundaries(vs.u[..., vs.taup1, npx.newaxis], settings.enable_cyclic_x)\n )\n vs.v = update(\n vs.v, at[...], mainutils.enforce_boundaries(vs.v[..., vs.taup1, npx.newaxis], settings.enable_cyclic_x)\n )\n vs.du = update(vs.du, at[..., vs.tau], 0)\n vs.dv = update(vs.dv, at[..., vs.tau], 0)\n\n\n@veros_kernel\ndef island_integrals(state):\n \"\"\"\n precalculate time independent island integrals\n \"\"\"\n vs = state.variables\n\n uloc = allocate(state.dimensions, (\"xt\", \"yt\", \"isle\"))\n vloc = allocate(state.dimensions, (\"xt\", \"yt\", \"isle\"))\n\n uloc = update(\n uloc,\n at[1:, 1:, :],\n -(vs.psin[1:, 1:, :] - vs.psin[1:, :-1, :])\n * vs.maskU[1:, 1:, -1, npx.newaxis]\n / vs.dyt[npx.newaxis, 1:, npx.newaxis]\n * vs.hur[1:, 1:, npx.newaxis],\n )\n\n vloc = update(\n vloc,\n at[1:, 1:, ...],\n (vs.psin[1:, 1:, :] - vs.psin[:-1, 1:, :])\n * vs.maskV[1:, 1:, -1, npx.newaxis]\n / (vs.cosu[npx.newaxis, 1:, npx.newaxis] * vs.dxt[1:, npx.newaxis, npx.newaxis])\n * vs.hvr[1:, 1:, npx.newaxis],\n )\n\n vs.line_psin = line_integrals.line_integrals(state, uloc=uloc, vloc=vloc, kind=\"full\")\n return KernelOutput(line_psin=vs.line_psin)\n\n\n@veros_kernel\ndef boundary_masks(state):\n \"\"\"\n now that the number of islands is known we can allocate the rest of the variables\n \"\"\"\n vs = state.variables\n settings = state.settings\n\n boundary_map = vs.land_map[..., npx.newaxis] == npx.arange(1, state.dimensions[\"isle\"] + 1)\n\n if settings.enable_cyclic_x:\n vs.line_dir_east_mask = update(\n vs.line_dir_east_mask, at[2:-2, 1:-1], boundary_map[3:-1, 1:-1] & ~boundary_map[3:-1, 2:]\n )\n vs.line_dir_west_mask = update(\n vs.line_dir_west_mask, at[2:-2, 1:-1], boundary_map[2:-2, 2:] & ~boundary_map[2:-2, 1:-1]\n )\n vs.line_dir_south_mask = update(\n vs.line_dir_south_mask, at[2:-2, 1:-1], boundary_map[2:-2, 1:-1] & ~boundary_map[3:-1, 1:-1]\n )\n vs.line_dir_north_mask = update(\n vs.line_dir_north_mask, at[2:-2, 1:-1], boundary_map[3:-1, 2:] & ~boundary_map[2:-2, 2:]\n )\n else:\n vs.line_dir_east_mask = update(\n vs.line_dir_east_mask, at[1:-1, 1:-1], boundary_map[2:, 1:-1] & ~boundary_map[2:, 2:]\n )\n vs.line_dir_west_mask = update(\n vs.line_dir_west_mask, at[1:-1, 1:-1], boundary_map[1:-1, 2:] & ~boundary_map[1:-1, 1:-1]\n )\n vs.line_dir_south_mask = update(\n vs.line_dir_south_mask, at[1:-1, 1:-1], boundary_map[1:-1, 1:-1] & ~boundary_map[2:, 1:-1]\n )\n vs.line_dir_north_mask = update(\n vs.line_dir_north_mask, at[1:-1, 1:-1], boundary_map[2:, 2:] & ~boundary_map[1:-1, 2:]\n )\n\n vs.isle_boundary_mask = ~npx.any(\n vs.line_dir_east_mask | vs.line_dir_west_mask | vs.line_dir_south_mask | vs.line_dir_north_mask, axis=2\n )\n\n return KernelOutput(\n isle_boundary_mask=vs.isle_boundary_mask,\n line_dir_east_mask=vs.line_dir_east_mask,\n line_dir_west_mask=vs.line_dir_west_mask,\n line_dir_south_mask=vs.line_dir_south_mask,\n line_dir_north_mask=vs.line_dir_north_mask,\n )\n", "path": "veros/core/external/streamfunction_init.py"}]} | 2,290 | 505 |
gh_patches_debug_7415 | rasdani/github-patches | git_diff | fonttools__fonttools-2439 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ReemKufiInk crashes pyftsubset
```
pyftsubset --text=duck --output-file=/tmp/reem.otf ReemKufiInk-Bold.otf
Traceback (most recent call last):
File "/tmp/venv/bin/pyftsubset", line 8, in <module>
sys.exit(main())
File "/tmp/venv/lib/python3.9/site-packages/fontTools/misc/loggingTools.py", line 372, in wrapper
return func(*args, **kwds)
File "/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py", line 3104, in main
subsetter.subset(font)
File "/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py", line 2908, in subset
self._subset_glyphs(font)
File "/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py", line 2846, in _subset_glyphs
retain = table.subset_glyphs(self)
File "/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py", line 2086, in subset_glyphs
colorGlyphsV1 = unbuildColrV1(self.table.LayerList, self.table.BaseGlyphList)
File "/tmp/venv/lib/python3.9/site-packages/fontTools/colorLib/unbuilder.py", line 6, in unbuildColrV1
unbuilder = LayerListUnbuilder(layerV1List.Paint)
AttributeError: 'NoneType' object has no attribute 'Paint'
```
TTX handles the font just fine. File from https://github.com/aliftype/reem-kufi/commits/colr-v1 at 93d6dcd693ae42bb4295701e88a07cc4d04db73c
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Lib/fontTools/colorLib/unbuilder.py`
Content:
```
1 from fontTools.ttLib.tables import otTables as ot
2 from .table_builder import TableUnbuilder
3
4
5 def unbuildColrV1(layerV1List, baseGlyphV1List):
6 unbuilder = LayerListUnbuilder(layerV1List.Paint)
7 return {
8 rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint)
9 for rec in baseGlyphV1List.BaseGlyphPaintRecord
10 }
11
12
13 def _flatten(lst):
14 for el in lst:
15 if isinstance(el, list):
16 yield from _flatten(el)
17 else:
18 yield el
19
20
21 class LayerListUnbuilder:
22 def __init__(self, layers):
23 self.layers = layers
24
25 callbacks = {
26 (
27 ot.Paint,
28 ot.PaintFormat.PaintColrLayers,
29 ): self._unbuildPaintColrLayers,
30 }
31 self.tableUnbuilder = TableUnbuilder(callbacks)
32
33 def unbuildPaint(self, paint):
34 assert isinstance(paint, ot.Paint)
35 return self.tableUnbuilder.unbuild(paint)
36
37 def _unbuildPaintColrLayers(self, source):
38 assert source["Format"] == ot.PaintFormat.PaintColrLayers
39
40 layers = list(
41 _flatten(
42 [
43 self.unbuildPaint(childPaint)
44 for childPaint in self.layers[
45 source["FirstLayerIndex"] : source["FirstLayerIndex"]
46 + source["NumLayers"]
47 ]
48 ]
49 )
50 )
51
52 if len(layers) == 1:
53 return layers[0]
54
55 return {"Format": source["Format"], "Layers": layers}
56
57
58 if __name__ == "__main__":
59 from pprint import pprint
60 import sys
61 from fontTools.ttLib import TTFont
62
63 try:
64 fontfile = sys.argv[1]
65 except IndexError:
66 sys.exit("usage: fonttools colorLib.unbuilder FONTFILE")
67
68 font = TTFont(fontfile)
69 colr = font["COLR"]
70 if colr.version < 1:
71 sys.exit(f"error: No COLR table version=1 found in {fontfile}")
72
73 colorGlyphs = unbuildColrV1(
74 colr.table.LayerList,
75 colr.table.BaseGlyphList,
76 ignoreVarIdx=not colr.table.VarStore,
77 )
78
79 pprint(colorGlyphs)
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Lib/fontTools/colorLib/unbuilder.py b/Lib/fontTools/colorLib/unbuilder.py
--- a/Lib/fontTools/colorLib/unbuilder.py
+++ b/Lib/fontTools/colorLib/unbuilder.py
@@ -2,11 +2,14 @@
from .table_builder import TableUnbuilder
-def unbuildColrV1(layerV1List, baseGlyphV1List):
- unbuilder = LayerListUnbuilder(layerV1List.Paint)
+def unbuildColrV1(layerList, baseGlyphList):
+ layers = []
+ if layerList:
+ layers = layerList.Paint
+ unbuilder = LayerListUnbuilder(layers)
return {
rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint)
- for rec in baseGlyphV1List.BaseGlyphPaintRecord
+ for rec in baseGlyphList.BaseGlyphPaintRecord
}
| {"golden_diff": "diff --git a/Lib/fontTools/colorLib/unbuilder.py b/Lib/fontTools/colorLib/unbuilder.py\n--- a/Lib/fontTools/colorLib/unbuilder.py\n+++ b/Lib/fontTools/colorLib/unbuilder.py\n@@ -2,11 +2,14 @@\n from .table_builder import TableUnbuilder\n \n \n-def unbuildColrV1(layerV1List, baseGlyphV1List):\n- unbuilder = LayerListUnbuilder(layerV1List.Paint)\n+def unbuildColrV1(layerList, baseGlyphList):\n+ layers = []\n+ if layerList:\n+ layers = layerList.Paint\n+ unbuilder = LayerListUnbuilder(layers)\n return {\n rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint)\n- for rec in baseGlyphV1List.BaseGlyphPaintRecord\n+ for rec in baseGlyphList.BaseGlyphPaintRecord\n }\n", "issue": "ReemKufiInk crashes pyftsubset\n```\r\npyftsubset --text=duck --output-file=/tmp/reem.otf ReemKufiInk-Bold.otf\r\n\r\nTraceback (most recent call last):\r\n File \"/tmp/venv/bin/pyftsubset\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/tmp/venv/lib/python3.9/site-packages/fontTools/misc/loggingTools.py\", line 372, in wrapper\r\n return func(*args, **kwds)\r\n File \"/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py\", line 3104, in main\r\n subsetter.subset(font)\r\n File \"/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py\", line 2908, in subset\r\n self._subset_glyphs(font)\r\n File \"/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py\", line 2846, in _subset_glyphs\r\n retain = table.subset_glyphs(self)\r\n File \"/tmp/venv/lib/python3.9/site-packages/fontTools/subset/__init__.py\", line 2086, in subset_glyphs\r\n colorGlyphsV1 = unbuildColrV1(self.table.LayerList, self.table.BaseGlyphList)\r\n File \"/tmp/venv/lib/python3.9/site-packages/fontTools/colorLib/unbuilder.py\", line 6, in unbuildColrV1\r\n unbuilder = LayerListUnbuilder(layerV1List.Paint)\r\nAttributeError: 'NoneType' object has no attribute 'Paint'\r\n```\r\n\r\nTTX handles the font just fine. File from https://github.com/aliftype/reem-kufi/commits/colr-v1 at 93d6dcd693ae42bb4295701e88a07cc4d04db73c\n", "before_files": [{"content": "from fontTools.ttLib.tables import otTables as ot\nfrom .table_builder import TableUnbuilder\n\n\ndef unbuildColrV1(layerV1List, baseGlyphV1List):\n unbuilder = LayerListUnbuilder(layerV1List.Paint)\n return {\n rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint)\n for rec in baseGlyphV1List.BaseGlyphPaintRecord\n }\n\n\ndef _flatten(lst):\n for el in lst:\n if isinstance(el, list):\n yield from _flatten(el)\n else:\n yield el\n\n\nclass LayerListUnbuilder:\n def __init__(self, layers):\n self.layers = layers\n\n callbacks = {\n (\n ot.Paint,\n ot.PaintFormat.PaintColrLayers,\n ): self._unbuildPaintColrLayers,\n }\n self.tableUnbuilder = TableUnbuilder(callbacks)\n\n def unbuildPaint(self, paint):\n assert isinstance(paint, ot.Paint)\n return self.tableUnbuilder.unbuild(paint)\n\n def _unbuildPaintColrLayers(self, source):\n assert source[\"Format\"] == ot.PaintFormat.PaintColrLayers\n\n layers = list(\n _flatten(\n [\n self.unbuildPaint(childPaint)\n for childPaint in self.layers[\n source[\"FirstLayerIndex\"] : source[\"FirstLayerIndex\"]\n + source[\"NumLayers\"]\n ]\n ]\n )\n )\n\n if len(layers) == 1:\n return layers[0]\n\n return {\"Format\": source[\"Format\"], \"Layers\": layers}\n\n\nif __name__ == \"__main__\":\n from pprint import pprint\n import sys\n from fontTools.ttLib import TTFont\n\n try:\n fontfile = sys.argv[1]\n except IndexError:\n sys.exit(\"usage: fonttools colorLib.unbuilder FONTFILE\")\n\n font = TTFont(fontfile)\n colr = font[\"COLR\"]\n if colr.version < 1:\n sys.exit(f\"error: No COLR table version=1 found in {fontfile}\")\n\n colorGlyphs = unbuildColrV1(\n colr.table.LayerList,\n colr.table.BaseGlyphList,\n ignoreVarIdx=not colr.table.VarStore,\n )\n\n pprint(colorGlyphs)\n", "path": "Lib/fontTools/colorLib/unbuilder.py"}], "after_files": [{"content": "from fontTools.ttLib.tables import otTables as ot\nfrom .table_builder import TableUnbuilder\n\n\ndef unbuildColrV1(layerList, baseGlyphList):\n layers = []\n if layerList:\n layers = layerList.Paint\n unbuilder = LayerListUnbuilder(layers)\n return {\n rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint)\n for rec in baseGlyphList.BaseGlyphPaintRecord\n }\n\n\ndef _flatten(lst):\n for el in lst:\n if isinstance(el, list):\n yield from _flatten(el)\n else:\n yield el\n\n\nclass LayerListUnbuilder:\n def __init__(self, layers):\n self.layers = layers\n\n callbacks = {\n (\n ot.Paint,\n ot.PaintFormat.PaintColrLayers,\n ): self._unbuildPaintColrLayers,\n }\n self.tableUnbuilder = TableUnbuilder(callbacks)\n\n def unbuildPaint(self, paint):\n assert isinstance(paint, ot.Paint)\n return self.tableUnbuilder.unbuild(paint)\n\n def _unbuildPaintColrLayers(self, source):\n assert source[\"Format\"] == ot.PaintFormat.PaintColrLayers\n\n layers = list(\n _flatten(\n [\n self.unbuildPaint(childPaint)\n for childPaint in self.layers[\n source[\"FirstLayerIndex\"] : source[\"FirstLayerIndex\"]\n + source[\"NumLayers\"]\n ]\n ]\n )\n )\n\n if len(layers) == 1:\n return layers[0]\n\n return {\"Format\": source[\"Format\"], \"Layers\": layers}\n\n\nif __name__ == \"__main__\":\n from pprint import pprint\n import sys\n from fontTools.ttLib import TTFont\n\n try:\n fontfile = sys.argv[1]\n except IndexError:\n sys.exit(\"usage: fonttools colorLib.unbuilder FONTFILE\")\n\n font = TTFont(fontfile)\n colr = font[\"COLR\"]\n if colr.version < 1:\n sys.exit(f\"error: No COLR table version=1 found in {fontfile}\")\n\n colorGlyphs = unbuildColrV1(\n colr.table.LayerList,\n colr.table.BaseGlyphList,\n ignoreVarIdx=not colr.table.VarStore,\n )\n\n pprint(colorGlyphs)\n", "path": "Lib/fontTools/colorLib/unbuilder.py"}]} | 1,339 | 194 |
gh_patches_debug_6818 | rasdani/github-patches | git_diff | sbi-dev__sbi-11 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Non-conditional density estimators are initialized with context_features=None, should support 0 as well
the CDEs have a default input context_features which defaults to None. When this is a positive integer, we get a CDE, for a DE we need to set it to None. It should support a value of zero, though.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import find_packages, setup
2
3 exec(open("lfi/version.py").read())
4
5 setup(
6 name="lfi",
7 version=__version__,
8 description="LFI + CDE.",
9 url="https://github.com/mackelab/lfi",
10 author="Conor Durkan",
11 packages=find_packages(exclude=["tests"]),
12 license="GPLv3",
13 test_requires=["pytest", "deepdiff", "torchtestcase"],
14 install_requires=[
15 "matplotlib",
16 "numpy",
17 "pyro-ppl",
18 "scipy",
19 "tensorboard",
20 "torch",
21 "tqdm",
22 ],
23 extras_requires={"dev": ["autoflake", "black", "flake8", "isort", "pytest"]},
24 dependency_links=[],
25 )
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,12 +1,12 @@
from setuptools import find_packages, setup
-exec(open("lfi/version.py").read())
+exec(open("sbi/version.py").read())
setup(
- name="lfi",
+ name="sbi",
version=__version__,
- description="LFI + CDE.",
- url="https://github.com/mackelab/lfi",
+ description="Simulation-based inference",
+ url="https://github.com/mackelab/sbi",
author="Conor Durkan",
packages=find_packages(exclude=["tests"]),
license="GPLv3",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,12 +1,12 @@\n from setuptools import find_packages, setup\n \n-exec(open(\"lfi/version.py\").read())\n+exec(open(\"sbi/version.py\").read())\n \n setup(\n- name=\"lfi\",\n+ name=\"sbi\",\n version=__version__,\n- description=\"LFI + CDE.\",\n- url=\"https://github.com/mackelab/lfi\",\n+ description=\"Simulation-based inference\",\n+ url=\"https://github.com/mackelab/sbi\",\n author=\"Conor Durkan\",\n packages=find_packages(exclude=[\"tests\"]),\n license=\"GPLv3\",\n", "issue": "Non-conditional density estimators are initialized with context_features=None, should support 0 as well\nthe CDEs have a default input context_features which defaults to None. When this is a positive integer, we get a CDE, for a DE we need to set it to None. It should support a value of zero, though.\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\nexec(open(\"lfi/version.py\").read())\n\nsetup(\n name=\"lfi\",\n version=__version__,\n description=\"LFI + CDE.\",\n url=\"https://github.com/mackelab/lfi\",\n author=\"Conor Durkan\",\n packages=find_packages(exclude=[\"tests\"]),\n license=\"GPLv3\",\n test_requires=[\"pytest\", \"deepdiff\", \"torchtestcase\"],\n install_requires=[\n \"matplotlib\",\n \"numpy\",\n \"pyro-ppl\",\n \"scipy\",\n \"tensorboard\",\n \"torch\",\n \"tqdm\",\n ],\n extras_requires={\"dev\": [\"autoflake\", \"black\", \"flake8\", \"isort\", \"pytest\"]},\n dependency_links=[],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import find_packages, setup\n\nexec(open(\"sbi/version.py\").read())\n\nsetup(\n name=\"sbi\",\n version=__version__,\n description=\"Simulation-based inference\",\n url=\"https://github.com/mackelab/sbi\",\n author=\"Conor Durkan\",\n packages=find_packages(exclude=[\"tests\"]),\n license=\"GPLv3\",\n test_requires=[\"pytest\", \"deepdiff\", \"torchtestcase\"],\n install_requires=[\n \"matplotlib\",\n \"numpy\",\n \"pyro-ppl\",\n \"scipy\",\n \"tensorboard\",\n \"torch\",\n \"tqdm\",\n ],\n extras_requires={\"dev\": [\"autoflake\", \"black\", \"flake8\", \"isort\", \"pytest\"]},\n dependency_links=[],\n)\n", "path": "setup.py"}]} | 536 | 155 |
gh_patches_debug_834 | rasdani/github-patches | git_diff | craiga__will-of-the-prophets-26 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Clean up login form
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `will_of_the_prophets/settings/__init__.py`
Content:
```
1 """
2 Django settings for will_of_the_prophets project.
3
4 Generated by 'django-admin startproject' using Django 2.0.4.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/2.0/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/2.0/ref/settings/
11 """
12
13 import os
14
15 import django_heroku
16
17 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
18 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
19
20
21 # Quick-start development settings - unsuitable for production
22 # See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
23
24 # SECURITY WARNING: keep the secret key used in production secret!
25 SECRET_KEY = os.environ.get(
26 'SECRET_KEY', 'bah!osmx@cpaoulc-!eohvd3ldoy*^oki#l25-v6tvq04=6npz')
27
28 # SECURITY WARNING: don't run with debug turned on in production!
29 DEBUG = os.environ.get('DEBUG', False)
30
31 ALLOWED_HOSTS = ['*.herokuapp.com', 'localhost']
32
33
34 # Application definition
35
36 INSTALLED_APPS = [
37 'raven.contrib.django.raven_compat',
38 'django.contrib.admin',
39 'django.contrib.auth',
40 'django.contrib.contenttypes',
41 'django.contrib.sessions',
42 'django.contrib.messages',
43 'django.contrib.staticfiles',
44 'sass_processor',
45 'bootstrap',
46 'will_of_the_prophets',
47 ]
48
49 MIDDLEWARE = [
50 'django.middleware.security.SecurityMiddleware',
51 'django.contrib.sessions.middleware.SessionMiddleware',
52 'django.middleware.common.CommonMiddleware',
53 'django.middleware.csrf.CsrfViewMiddleware',
54 'django.contrib.auth.middleware.AuthenticationMiddleware',
55 'django.contrib.messages.middleware.MessageMiddleware',
56 'django.middleware.clickjacking.XFrameOptionsMiddleware',
57 ]
58
59 ROOT_URLCONF = 'will_of_the_prophets.urls'
60
61 TEMPLATES = [
62 {
63 'BACKEND': 'django.template.backends.django.DjangoTemplates',
64 'DIRS': [],
65 'APP_DIRS': True,
66 'OPTIONS': {
67 'context_processors': [
68 'django.template.context_processors.debug',
69 'django.template.context_processors.request',
70 'django.contrib.auth.context_processors.auth',
71 'django.contrib.messages.context_processors.messages',
72 ],
73 },
74 },
75 ]
76
77 WSGI_APPLICATION = 'will_of_the_prophets.wsgi.application'
78
79
80 # Database
81 # https://docs.djangoproject.com/en/2.0/ref/settings/#databases
82
83 DATABASES = {
84 'default': {
85 'ENGINE': 'django.db.backends.sqlite3',
86 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
87 }
88 }
89
90
91 # Password validation
92 # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
93
94 AUTH_PASSWORD_VALIDATORS = [
95 {
96 'NAME': ('django.contrib.auth.password_validation'
97 '.UserAttributeSimilarityValidator'),
98 },
99 {
100 'NAME': ('django.contrib.auth.password_validation'
101 '.MinimumLengthValidator'),
102 },
103 {
104 'NAME': ('django.contrib.auth.password_validation'
105 '.CommonPasswordValidator'),
106 },
107 {
108 'NAME': ('django.contrib.auth.password_validation'
109 '.NumericPasswordValidator'),
110 },
111 ]
112
113
114 # Internationalization
115 # https://docs.djangoproject.com/en/2.0/topics/i18n/
116
117 LANGUAGE_CODE = 'en-us'
118
119 TIME_ZONE = 'UTC'
120
121 USE_I18N = True
122
123 USE_L10N = True
124
125 USE_TZ = True
126
127
128 # Static files (CSS, JavaScript, Images)
129 # https://docs.djangoproject.com/en/2.0/howto/static-files/
130
131 STATIC_URL = '/static/'
132
133 STATICFILES_FINDERS = [
134 'django.contrib.staticfiles.finders.FileSystemFinder',
135 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
136 # https://github.com/jrief/django-sass-processor
137 'sass_processor.finders.CssFinder',
138 ]
139
140
141 # django-sass-processor
142 # https://github.com/jrief/django-sass-processor
143 SASS_OUTPUT_STYLE = 'compressed'
144
145
146 # Configure Django App for Heroku.
147 django_heroku.settings(locals())
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/will_of_the_prophets/settings/__init__.py b/will_of_the_prophets/settings/__init__.py
--- a/will_of_the_prophets/settings/__init__.py
+++ b/will_of_the_prophets/settings/__init__.py
@@ -42,6 +42,7 @@
'django.contrib.messages',
'django.contrib.staticfiles',
'sass_processor',
+ 'widget_tweaks',
'bootstrap',
'will_of_the_prophets',
]
| {"golden_diff": "diff --git a/will_of_the_prophets/settings/__init__.py b/will_of_the_prophets/settings/__init__.py\n--- a/will_of_the_prophets/settings/__init__.py\n+++ b/will_of_the_prophets/settings/__init__.py\n@@ -42,6 +42,7 @@\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'sass_processor',\n+ 'widget_tweaks',\n 'bootstrap',\n 'will_of_the_prophets',\n ]\n", "issue": "Clean up login form\n\n", "before_files": [{"content": "\"\"\"\nDjango settings for will_of_the_prophets project.\n\nGenerated by 'django-admin startproject' using Django 2.0.4.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.0/ref/settings/\n\"\"\"\n\nimport os\n\nimport django_heroku\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\n 'SECRET_KEY', 'bah!osmx@cpaoulc-!eohvd3ldoy*^oki#l25-v6tvq04=6npz')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get('DEBUG', False)\n\nALLOWED_HOSTS = ['*.herokuapp.com', 'localhost']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'raven.contrib.django.raven_compat',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'sass_processor',\n 'bootstrap',\n 'will_of_the_prophets',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'will_of_the_prophets.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'will_of_the_prophets.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.0/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.UserAttributeSimilarityValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.MinimumLengthValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.CommonPasswordValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.NumericPasswordValidator'),\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.0/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # https://github.com/jrief/django-sass-processor\n 'sass_processor.finders.CssFinder',\n]\n\n\n# django-sass-processor\n# https://github.com/jrief/django-sass-processor\nSASS_OUTPUT_STYLE = 'compressed'\n\n\n# Configure Django App for Heroku.\ndjango_heroku.settings(locals())\n", "path": "will_of_the_prophets/settings/__init__.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for will_of_the_prophets project.\n\nGenerated by 'django-admin startproject' using Django 2.0.4.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/2.0/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/2.0/ref/settings/\n\"\"\"\n\nimport os\n\nimport django_heroku\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ.get(\n 'SECRET_KEY', 'bah!osmx@cpaoulc-!eohvd3ldoy*^oki#l25-v6tvq04=6npz')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get('DEBUG', False)\n\nALLOWED_HOSTS = ['*.herokuapp.com', 'localhost']\n\n\n# Application definition\n\nINSTALLED_APPS = [\n 'raven.contrib.django.raven_compat',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'sass_processor',\n 'widget_tweaks',\n 'bootstrap',\n 'will_of_the_prophets',\n]\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'will_of_the_prophets.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'will_of_the_prophets.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.0/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Password validation\n# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.UserAttributeSimilarityValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.MinimumLengthValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.CommonPasswordValidator'),\n },\n {\n 'NAME': ('django.contrib.auth.password_validation'\n '.NumericPasswordValidator'),\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.0/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_FINDERS = [\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # https://github.com/jrief/django-sass-processor\n 'sass_processor.finders.CssFinder',\n]\n\n\n# django-sass-processor\n# https://github.com/jrief/django-sass-processor\nSASS_OUTPUT_STYLE = 'compressed'\n\n\n# Configure Django App for Heroku.\ndjango_heroku.settings(locals())\n", "path": "will_of_the_prophets/settings/__init__.py"}]} | 1,519 | 112 |
gh_patches_debug_30796 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-756 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[cfn-lint] E0002:Unknown exception while processing rule E2541: unhashable type: 'dict_node'
*cfn-lint version: 0.16.0
I got the error message:
`[cfn-lint] E0002:Unknown exception while processing rule E2541: unhashable type: 'dict_node'
`
If I put a "!Ref Name" in the source action Name: see below
```
Stages:
- Name: GitHub
Actions:
- Name: !Ref GitHubSourceRepo1
ActionTypeId:
Category: Source
Owner: Custom
Version: 1
Provider: GitHUBcustom
.
.
.
.
```
If I remove the !Ref, the cfn-lint works fine.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py`
Content:
```
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3
4 Permission is hereby granted, free of charge, to any person obtaining a copy of this
5 software and associated documentation files (the "Software"), to deal in the Software
6 without restriction, including without limitation the rights to use, copy, modify,
7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
8 permit persons to whom the Software is furnished to do so.
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
16 """
17 from cfnlint import CloudFormationLintRule
18 from cfnlint import RuleMatch
19
20
21 class CodepipelineStageActions(CloudFormationLintRule):
22 """Check if CodePipeline Stage Actions are set up properly."""
23 id = 'E2541'
24 shortdesc = 'CodePipeline Stage Actions'
25 description = 'See if CodePipeline stage actions are set correctly'
26 source_url = 'https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#pipeline-requirements'
27 tags = ['resources', 'codepipeline']
28
29 CONSTRAINTS = {
30 'AWS': {
31 'Source': {
32 'S3': {
33 'InputArtifactRange': 0,
34 'OutputArtifactRange': 1,
35 },
36 'CodeCommit': {
37 'InputArtifactRange': 0,
38 'OutputArtifactRange': 1,
39 }
40 },
41 'Test': {
42 'CodeBuild': {
43 'InputArtifactRange': (1, 5),
44 'OutputArtifactRange': (0, 5),
45 }
46 },
47 'Approval': {
48 'Manual': {
49 'InputArtifactRange': 0,
50 'OutputArtifactRange': 0,
51 }
52 },
53 'Deploy': {
54 'CloudFormation': {
55 'InputArtifactRange': (0, 10),
56 'OutputArtifactRange': (0, 1),
57 },
58 'CodeDeploy': {
59 'InputArtifactRange': 1,
60 'OutputArtifactRange': 0,
61 },
62 'ElasticBeanstalk': {
63 'InputArtifactRange': 1,
64 'OutputArtifactRange': 0,
65 },
66 'OpsWorks': {
67 'InputArtifactRange': 1,
68 'OutputArtifactRange': 0,
69 },
70 'ECS': {
71 'InputArtifactRange': 1,
72 'OutputArtifactRange': 0,
73 },
74 },
75 'Invoke': {
76 'Lambda': {
77 'InputArtifactRange': (0, 5),
78 'OutputArtifactRange': (0, 5),
79 }
80 }
81 },
82 'ThirdParty': {
83 'Source': {
84 'GitHub': {
85 'InputArtifactRange': 0,
86 'OutputArtifactRange': 1,
87 }
88 },
89 },
90 }
91
92 KEY_MAP = {
93 'InputArtifacts': 'InputArtifactRange',
94 'OutputArtifacts': 'OutputArtifactRange',
95 }
96
97 def check_artifact_counts(self, action, artifact_type, path):
98 """Check that artifact counts are within valid ranges."""
99 matches = []
100
101 action_type_id = action.get('ActionTypeId')
102 owner = action_type_id.get('Owner')
103 category = action_type_id.get('Category')
104 provider = action_type_id.get('Provider')
105
106 if isinstance(owner, dict) or isinstance(category, dict) or isinstance(provider, dict):
107 self.logger.debug('owner, category, provider need to be strings to validate. Skipping.')
108 return matches
109
110 constraints = self.CONSTRAINTS.get(owner, {}).get(category, {}).get(provider, {})
111 if not constraints:
112 return matches
113 artifact_count = len(action.get(artifact_type, []))
114
115 constraint_key = self.KEY_MAP[artifact_type]
116 if isinstance(constraints[constraint_key], tuple):
117 min_, max_ = constraints[constraint_key]
118 if not (min_ <= artifact_count <= max_):
119 message = (
120 'Action "{action}" declares {number} {artifact_type} which is not in '
121 'expected range [{a}, {b}].'
122 ).format(
123 action=action['Name'],
124 number=artifact_count,
125 artifact_type=artifact_type,
126 a=min_,
127 b=max_
128 )
129 matches.append(RuleMatch(
130 path + [artifact_type],
131 message
132 ))
133 else:
134 if artifact_count != constraints[constraint_key]:
135 message = (
136 'Action "{action}" declares {number} {artifact_type} which is not the '
137 'expected number [{a}].'
138 ).format(
139 action=action['Name'],
140 number=artifact_count,
141 artifact_type=artifact_type,
142 a=constraints[constraint_key]
143 )
144 matches.append(RuleMatch(
145 path + [artifact_type],
146 message
147 ))
148
149 return matches
150
151 def check_version(self, action, path):
152 """Check that action type version is valid."""
153 matches = []
154
155 version = action.get('ActionTypeId', {}).get('Version')
156 if isinstance(version, dict):
157 self.logger.debug('Unable to validate version when an object is used. Skipping')
158 elif version != '1':
159 message = 'For all currently supported action types, the only valid version string is "1".'
160 matches.append(RuleMatch(
161 path + ['ActionTypeId', 'Version'],
162 message
163 ))
164 return matches
165
166 def check_names_unique(self, action, path, action_names):
167 """Check that action names are unique."""
168 matches = []
169
170 if action.get('Name') in action_names:
171 message = 'All action names within a stage must be unique. ({name})'.format(
172 name=action.get('Name')
173 )
174 matches.append(RuleMatch(path + ['Name'], message))
175 action_names.add(action.get('Name'))
176
177 return matches
178
179 def match(self, cfn):
180 """Check that stage actions are set up properly."""
181 matches = []
182
183 resources = cfn.get_resource_properties(['AWS::CodePipeline::Pipeline'])
184 for resource in resources:
185 path = resource['Path']
186 properties = resource['Value']
187
188 s_stages = properties.get_safe('Stages', path)
189 for s_stage_v, s_stage_p in s_stages:
190 if not isinstance(s_stage_v, list):
191 self.logger.debug('Stages not list. Should have been caught by generic linting.')
192 return matches
193
194 for l_i_stage, l_i_path in s_stage_v.items_safe(s_stage_p):
195 action_names = set()
196 s_actions = l_i_stage.get_safe('Actions', l_i_path)
197 for s_action_v, s_action_p in s_actions:
198 if not isinstance(s_action_v, list):
199 self.logger.debug('Actions not list. Should have been caught by generic linting.')
200 return matches
201
202 for l_i_a_action, l_i_a_path in s_action_v.items_safe(s_action_p):
203 try:
204 full_path = path + l_i_a_path
205 matches.extend(self.check_names_unique(l_i_a_action, full_path, action_names))
206 matches.extend(self.check_version(l_i_a_action, full_path))
207 matches.extend(self.check_artifact_counts(l_i_a_action, 'InputArtifacts', full_path))
208 matches.extend(self.check_artifact_counts(l_i_a_action, 'OutputArtifacts', full_path))
209 except AttributeError as err:
210 self.logger.debug('Got AttributeError. Should have been caught by generic linting. '
211 'Ignoring the error here: %s', str(err))
212
213 return matches
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py b/src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py
--- a/src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py
+++ b/src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py
@@ -14,6 +14,7 @@
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
+import six
from cfnlint import CloudFormationLintRule
from cfnlint import RuleMatch
@@ -44,6 +45,12 @@
'OutputArtifactRange': (0, 5),
}
},
+ 'Build': {
+ 'CodeBuild': {
+ 'InputArtifactRange': (1, 5),
+ 'OutputArtifactRange': (0, 5),
+ }
+ },
'Approval': {
'Manual': {
'InputArtifactRange': 0,
@@ -167,12 +174,14 @@
"""Check that action names are unique."""
matches = []
- if action.get('Name') in action_names:
- message = 'All action names within a stage must be unique. ({name})'.format(
- name=action.get('Name')
- )
- matches.append(RuleMatch(path + ['Name'], message))
- action_names.add(action.get('Name'))
+ action_name = action.get('Name')
+ if isinstance(action_name, six.string_types):
+ if action.get('Name') in action_names:
+ message = 'All action names within a stage must be unique. ({name})'.format(
+ name=action.get('Name')
+ )
+ matches.append(RuleMatch(path + ['Name'], message))
+ action_names.add(action.get('Name'))
return matches
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py b/src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py\n--- a/src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py\n+++ b/src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py\n@@ -14,6 +14,7 @@\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n \"\"\"\n+import six\n from cfnlint import CloudFormationLintRule\n from cfnlint import RuleMatch\n \n@@ -44,6 +45,12 @@\n 'OutputArtifactRange': (0, 5),\n }\n },\n+ 'Build': {\n+ 'CodeBuild': {\n+ 'InputArtifactRange': (1, 5),\n+ 'OutputArtifactRange': (0, 5),\n+ }\n+ },\n 'Approval': {\n 'Manual': {\n 'InputArtifactRange': 0,\n@@ -167,12 +174,14 @@\n \"\"\"Check that action names are unique.\"\"\"\n matches = []\n \n- if action.get('Name') in action_names:\n- message = 'All action names within a stage must be unique. ({name})'.format(\n- name=action.get('Name')\n- )\n- matches.append(RuleMatch(path + ['Name'], message))\n- action_names.add(action.get('Name'))\n+ action_name = action.get('Name')\n+ if isinstance(action_name, six.string_types):\n+ if action.get('Name') in action_names:\n+ message = 'All action names within a stage must be unique. ({name})'.format(\n+ name=action.get('Name')\n+ )\n+ matches.append(RuleMatch(path + ['Name'], message))\n+ action_names.add(action.get('Name'))\n \n return matches\n", "issue": "[cfn-lint] E0002:Unknown exception while processing rule E2541: unhashable type: 'dict_node'\n*cfn-lint version: 0.16.0\r\n\r\nI got the error message:\r\n\r\n`[cfn-lint] E0002:Unknown exception while processing rule E2541: unhashable type: 'dict_node'\r\n`\r\n\r\nIf I put a \"!Ref Name\" in the source action Name: see below\r\n \r\n```\r\nStages:\r\n - Name: GitHub\r\n Actions:\r\n - Name: !Ref GitHubSourceRepo1\r\n ActionTypeId:\r\n Category: Source\r\n Owner: Custom\r\n Version: 1\r\n Provider: GitHUBcustom\r\n.\r\n.\r\n.\r\n.\r\n```\r\n\r\nIf I remove the !Ref, the cfn-lint works fine.\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass CodepipelineStageActions(CloudFormationLintRule):\n \"\"\"Check if CodePipeline Stage Actions are set up properly.\"\"\"\n id = 'E2541'\n shortdesc = 'CodePipeline Stage Actions'\n description = 'See if CodePipeline stage actions are set correctly'\n source_url = 'https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#pipeline-requirements'\n tags = ['resources', 'codepipeline']\n\n CONSTRAINTS = {\n 'AWS': {\n 'Source': {\n 'S3': {\n 'InputArtifactRange': 0,\n 'OutputArtifactRange': 1,\n },\n 'CodeCommit': {\n 'InputArtifactRange': 0,\n 'OutputArtifactRange': 1,\n }\n },\n 'Test': {\n 'CodeBuild': {\n 'InputArtifactRange': (1, 5),\n 'OutputArtifactRange': (0, 5),\n }\n },\n 'Approval': {\n 'Manual': {\n 'InputArtifactRange': 0,\n 'OutputArtifactRange': 0,\n }\n },\n 'Deploy': {\n 'CloudFormation': {\n 'InputArtifactRange': (0, 10),\n 'OutputArtifactRange': (0, 1),\n },\n 'CodeDeploy': {\n 'InputArtifactRange': 1,\n 'OutputArtifactRange': 0,\n },\n 'ElasticBeanstalk': {\n 'InputArtifactRange': 1,\n 'OutputArtifactRange': 0,\n },\n 'OpsWorks': {\n 'InputArtifactRange': 1,\n 'OutputArtifactRange': 0,\n },\n 'ECS': {\n 'InputArtifactRange': 1,\n 'OutputArtifactRange': 0,\n },\n },\n 'Invoke': {\n 'Lambda': {\n 'InputArtifactRange': (0, 5),\n 'OutputArtifactRange': (0, 5),\n }\n }\n },\n 'ThirdParty': {\n 'Source': {\n 'GitHub': {\n 'InputArtifactRange': 0,\n 'OutputArtifactRange': 1,\n }\n },\n },\n }\n\n KEY_MAP = {\n 'InputArtifacts': 'InputArtifactRange',\n 'OutputArtifacts': 'OutputArtifactRange',\n }\n\n def check_artifact_counts(self, action, artifact_type, path):\n \"\"\"Check that artifact counts are within valid ranges.\"\"\"\n matches = []\n\n action_type_id = action.get('ActionTypeId')\n owner = action_type_id.get('Owner')\n category = action_type_id.get('Category')\n provider = action_type_id.get('Provider')\n\n if isinstance(owner, dict) or isinstance(category, dict) or isinstance(provider, dict):\n self.logger.debug('owner, category, provider need to be strings to validate. Skipping.')\n return matches\n\n constraints = self.CONSTRAINTS.get(owner, {}).get(category, {}).get(provider, {})\n if not constraints:\n return matches\n artifact_count = len(action.get(artifact_type, []))\n\n constraint_key = self.KEY_MAP[artifact_type]\n if isinstance(constraints[constraint_key], tuple):\n min_, max_ = constraints[constraint_key]\n if not (min_ <= artifact_count <= max_):\n message = (\n 'Action \"{action}\" declares {number} {artifact_type} which is not in '\n 'expected range [{a}, {b}].'\n ).format(\n action=action['Name'],\n number=artifact_count,\n artifact_type=artifact_type,\n a=min_,\n b=max_\n )\n matches.append(RuleMatch(\n path + [artifact_type],\n message\n ))\n else:\n if artifact_count != constraints[constraint_key]:\n message = (\n 'Action \"{action}\" declares {number} {artifact_type} which is not the '\n 'expected number [{a}].'\n ).format(\n action=action['Name'],\n number=artifact_count,\n artifact_type=artifact_type,\n a=constraints[constraint_key]\n )\n matches.append(RuleMatch(\n path + [artifact_type],\n message\n ))\n\n return matches\n\n def check_version(self, action, path):\n \"\"\"Check that action type version is valid.\"\"\"\n matches = []\n\n version = action.get('ActionTypeId', {}).get('Version')\n if isinstance(version, dict):\n self.logger.debug('Unable to validate version when an object is used. Skipping')\n elif version != '1':\n message = 'For all currently supported action types, the only valid version string is \"1\".'\n matches.append(RuleMatch(\n path + ['ActionTypeId', 'Version'],\n message\n ))\n return matches\n\n def check_names_unique(self, action, path, action_names):\n \"\"\"Check that action names are unique.\"\"\"\n matches = []\n\n if action.get('Name') in action_names:\n message = 'All action names within a stage must be unique. ({name})'.format(\n name=action.get('Name')\n )\n matches.append(RuleMatch(path + ['Name'], message))\n action_names.add(action.get('Name'))\n\n return matches\n\n def match(self, cfn):\n \"\"\"Check that stage actions are set up properly.\"\"\"\n matches = []\n\n resources = cfn.get_resource_properties(['AWS::CodePipeline::Pipeline'])\n for resource in resources:\n path = resource['Path']\n properties = resource['Value']\n\n s_stages = properties.get_safe('Stages', path)\n for s_stage_v, s_stage_p in s_stages:\n if not isinstance(s_stage_v, list):\n self.logger.debug('Stages not list. Should have been caught by generic linting.')\n return matches\n\n for l_i_stage, l_i_path in s_stage_v.items_safe(s_stage_p):\n action_names = set()\n s_actions = l_i_stage.get_safe('Actions', l_i_path)\n for s_action_v, s_action_p in s_actions:\n if not isinstance(s_action_v, list):\n self.logger.debug('Actions not list. Should have been caught by generic linting.')\n return matches\n\n for l_i_a_action, l_i_a_path in s_action_v.items_safe(s_action_p):\n try:\n full_path = path + l_i_a_path\n matches.extend(self.check_names_unique(l_i_a_action, full_path, action_names))\n matches.extend(self.check_version(l_i_a_action, full_path))\n matches.extend(self.check_artifact_counts(l_i_a_action, 'InputArtifacts', full_path))\n matches.extend(self.check_artifact_counts(l_i_a_action, 'OutputArtifacts', full_path))\n except AttributeError as err:\n self.logger.debug('Got AttributeError. Should have been caught by generic linting. '\n 'Ignoring the error here: %s', str(err))\n\n return matches\n", "path": "src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass CodepipelineStageActions(CloudFormationLintRule):\n \"\"\"Check if CodePipeline Stage Actions are set up properly.\"\"\"\n id = 'E2541'\n shortdesc = 'CodePipeline Stage Actions'\n description = 'See if CodePipeline stage actions are set correctly'\n source_url = 'https://docs.aws.amazon.com/codepipeline/latest/userguide/reference-pipeline-structure.html#pipeline-requirements'\n tags = ['resources', 'codepipeline']\n\n CONSTRAINTS = {\n 'AWS': {\n 'Source': {\n 'S3': {\n 'InputArtifactRange': 0,\n 'OutputArtifactRange': 1,\n },\n 'CodeCommit': {\n 'InputArtifactRange': 0,\n 'OutputArtifactRange': 1,\n }\n },\n 'Test': {\n 'CodeBuild': {\n 'InputArtifactRange': (1, 5),\n 'OutputArtifactRange': (0, 5),\n }\n },\n 'Build': {\n 'CodeBuild': {\n 'InputArtifactRange': (1, 5),\n 'OutputArtifactRange': (0, 5),\n }\n },\n 'Approval': {\n 'Manual': {\n 'InputArtifactRange': 0,\n 'OutputArtifactRange': 0,\n }\n },\n 'Deploy': {\n 'CloudFormation': {\n 'InputArtifactRange': (0, 10),\n 'OutputArtifactRange': (0, 1),\n },\n 'CodeDeploy': {\n 'InputArtifactRange': 1,\n 'OutputArtifactRange': 0,\n },\n 'ElasticBeanstalk': {\n 'InputArtifactRange': 1,\n 'OutputArtifactRange': 0,\n },\n 'OpsWorks': {\n 'InputArtifactRange': 1,\n 'OutputArtifactRange': 0,\n },\n 'ECS': {\n 'InputArtifactRange': 1,\n 'OutputArtifactRange': 0,\n },\n },\n 'Invoke': {\n 'Lambda': {\n 'InputArtifactRange': (0, 5),\n 'OutputArtifactRange': (0, 5),\n }\n }\n },\n 'ThirdParty': {\n 'Source': {\n 'GitHub': {\n 'InputArtifactRange': 0,\n 'OutputArtifactRange': 1,\n }\n },\n },\n }\n\n KEY_MAP = {\n 'InputArtifacts': 'InputArtifactRange',\n 'OutputArtifacts': 'OutputArtifactRange',\n }\n\n def check_artifact_counts(self, action, artifact_type, path):\n \"\"\"Check that artifact counts are within valid ranges.\"\"\"\n matches = []\n\n action_type_id = action.get('ActionTypeId')\n owner = action_type_id.get('Owner')\n category = action_type_id.get('Category')\n provider = action_type_id.get('Provider')\n\n if isinstance(owner, dict) or isinstance(category, dict) or isinstance(provider, dict):\n self.logger.debug('owner, category, provider need to be strings to validate. Skipping.')\n return matches\n\n constraints = self.CONSTRAINTS.get(owner, {}).get(category, {}).get(provider, {})\n if not constraints:\n return matches\n artifact_count = len(action.get(artifact_type, []))\n\n constraint_key = self.KEY_MAP[artifact_type]\n if isinstance(constraints[constraint_key], tuple):\n min_, max_ = constraints[constraint_key]\n if not (min_ <= artifact_count <= max_):\n message = (\n 'Action \"{action}\" declares {number} {artifact_type} which is not in '\n 'expected range [{a}, {b}].'\n ).format(\n action=action['Name'],\n number=artifact_count,\n artifact_type=artifact_type,\n a=min_,\n b=max_\n )\n matches.append(RuleMatch(\n path + [artifact_type],\n message\n ))\n else:\n if artifact_count != constraints[constraint_key]:\n message = (\n 'Action \"{action}\" declares {number} {artifact_type} which is not the '\n 'expected number [{a}].'\n ).format(\n action=action['Name'],\n number=artifact_count,\n artifact_type=artifact_type,\n a=constraints[constraint_key]\n )\n matches.append(RuleMatch(\n path + [artifact_type],\n message\n ))\n\n return matches\n\n def check_version(self, action, path):\n \"\"\"Check that action type version is valid.\"\"\"\n matches = []\n\n version = action.get('ActionTypeId', {}).get('Version')\n if isinstance(version, dict):\n self.logger.debug('Unable to validate version when an object is used. Skipping')\n elif version != '1':\n message = 'For all currently supported action types, the only valid version string is \"1\".'\n matches.append(RuleMatch(\n path + ['ActionTypeId', 'Version'],\n message\n ))\n return matches\n\n def check_names_unique(self, action, path, action_names):\n \"\"\"Check that action names are unique.\"\"\"\n matches = []\n\n action_name = action.get('Name')\n if isinstance(action_name, six.string_types):\n if action.get('Name') in action_names:\n message = 'All action names within a stage must be unique. ({name})'.format(\n name=action.get('Name')\n )\n matches.append(RuleMatch(path + ['Name'], message))\n action_names.add(action.get('Name'))\n\n return matches\n\n def match(self, cfn):\n \"\"\"Check that stage actions are set up properly.\"\"\"\n matches = []\n\n resources = cfn.get_resource_properties(['AWS::CodePipeline::Pipeline'])\n for resource in resources:\n path = resource['Path']\n properties = resource['Value']\n\n s_stages = properties.get_safe('Stages', path)\n for s_stage_v, s_stage_p in s_stages:\n if not isinstance(s_stage_v, list):\n self.logger.debug('Stages not list. Should have been caught by generic linting.')\n return matches\n\n for l_i_stage, l_i_path in s_stage_v.items_safe(s_stage_p):\n action_names = set()\n s_actions = l_i_stage.get_safe('Actions', l_i_path)\n for s_action_v, s_action_p in s_actions:\n if not isinstance(s_action_v, list):\n self.logger.debug('Actions not list. Should have been caught by generic linting.')\n return matches\n\n for l_i_a_action, l_i_a_path in s_action_v.items_safe(s_action_p):\n try:\n full_path = path + l_i_a_path\n matches.extend(self.check_names_unique(l_i_a_action, full_path, action_names))\n matches.extend(self.check_version(l_i_a_action, full_path))\n matches.extend(self.check_artifact_counts(l_i_a_action, 'InputArtifacts', full_path))\n matches.extend(self.check_artifact_counts(l_i_a_action, 'OutputArtifacts', full_path))\n except AttributeError as err:\n self.logger.debug('Got AttributeError. Should have been caught by generic linting. '\n 'Ignoring the error here: %s', str(err))\n\n return matches\n", "path": "src/cfnlint/rules/resources/codepipeline/CodepipelineStageActions.py"}]} | 2,677 | 421 |
gh_patches_debug_9798 | rasdani/github-patches | git_diff | netbox-community__netbox-15788 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New User model has a 32-bit integer `id` field
### Deployment Type
NetBox Cloud
### NetBox Version
v4.0-beta1
### Python Version
3.10
### Steps to Reproduce
1. Upgrade a v3.7 database to v4.0
2. Inspect the `users_user` table
### Expected Behavior
The `id` column of the `users_user` table should be a `bigint` (64-bit integer), like all other models in NetBox.
### Observed Behavior
The `id` column is a regular 32-bit integer. This is because we renamed the stock Django table, which uses a 32-bit integer `id` field.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/users/migrations/0005_alter_user_table.py`
Content:
```
1 from django.db import migrations
2
3
4 def update_content_types(apps, schema_editor):
5 ContentType = apps.get_model('contenttypes', 'ContentType')
6 # Delete the new ContentTypes effected by the new models in the users app
7 ContentType.objects.filter(app_label='users', model='user').delete()
8
9 # Update the app labels of the original ContentTypes for auth.User to ensure
10 # that any foreign key references are preserved
11 ContentType.objects.filter(app_label='auth', model='user').update(app_label='users')
12
13 netboxuser_ct = ContentType.objects.filter(app_label='users', model='netboxuser').first()
14 if netboxuser_ct:
15 user_ct = ContentType.objects.filter(app_label='users', model='user').first()
16 CustomField = apps.get_model('extras', 'CustomField')
17 CustomField.objects.filter(related_object_type_id=netboxuser_ct.id).update(related_object_type_id=user_ct.id)
18 netboxuser_ct.delete()
19
20
21 class Migration(migrations.Migration):
22
23 dependencies = [
24 ('users', '0002_squashed_0004'),
25 ]
26
27 operations = [
28 # The User table was originally created as 'auth_user'. Now we nullify the model's
29 # db_table option, so that it defaults to the app & model name (users_user). This
30 # causes the database table to be renamed.
31 migrations.AlterModelTable(
32 name='user',
33 table=None,
34 ),
35
36 # Rename auth_user_* sequences
37 migrations.RunSQL("ALTER TABLE auth_user_groups_id_seq RENAME TO users_user_groups_id_seq"),
38 migrations.RunSQL("ALTER TABLE auth_user_id_seq RENAME TO users_user_id_seq"),
39 migrations.RunSQL("ALTER TABLE auth_user_user_permissions_id_seq RENAME TO users_user_user_permissions_id_seq"),
40
41 # Rename auth_user_* indexes
42 migrations.RunSQL("ALTER INDEX auth_user_pkey RENAME TO users_user_pkey"),
43 # Hash is deterministic; generated via schema_editor._create_index_name()
44 migrations.RunSQL("ALTER INDEX auth_user_username_6821ab7c_like RENAME TO users_user_username_06e46fe6_like"),
45 migrations.RunSQL("ALTER INDEX auth_user_username_key RENAME TO users_user_username_key"),
46
47 # Update ContentTypes
48 migrations.RunPython(
49 code=update_content_types,
50 reverse_code=migrations.RunPython.noop
51 ),
52 ]
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netbox/users/migrations/0005_alter_user_table.py b/netbox/users/migrations/0005_alter_user_table.py
--- a/netbox/users/migrations/0005_alter_user_table.py
+++ b/netbox/users/migrations/0005_alter_user_table.py
@@ -33,6 +33,9 @@
table=None,
),
+ # Convert the `id` column to a 64-bit integer (BigAutoField is implied by DEFAULT_AUTO_FIELD)
+ migrations.RunSQL("ALTER TABLE users_user ALTER COLUMN id TYPE bigint"),
+
# Rename auth_user_* sequences
migrations.RunSQL("ALTER TABLE auth_user_groups_id_seq RENAME TO users_user_groups_id_seq"),
migrations.RunSQL("ALTER TABLE auth_user_id_seq RENAME TO users_user_id_seq"),
| {"golden_diff": "diff --git a/netbox/users/migrations/0005_alter_user_table.py b/netbox/users/migrations/0005_alter_user_table.py\n--- a/netbox/users/migrations/0005_alter_user_table.py\n+++ b/netbox/users/migrations/0005_alter_user_table.py\n@@ -33,6 +33,9 @@\n table=None,\n ),\n \n+ # Convert the `id` column to a 64-bit integer (BigAutoField is implied by DEFAULT_AUTO_FIELD)\n+ migrations.RunSQL(\"ALTER TABLE users_user ALTER COLUMN id TYPE bigint\"),\n+\n # Rename auth_user_* sequences\n migrations.RunSQL(\"ALTER TABLE auth_user_groups_id_seq RENAME TO users_user_groups_id_seq\"),\n migrations.RunSQL(\"ALTER TABLE auth_user_id_seq RENAME TO users_user_id_seq\"),\n", "issue": "New User model has a 32-bit integer `id` field\n### Deployment Type\n\nNetBox Cloud\n\n### NetBox Version\n\nv4.0-beta1\n\n### Python Version\n\n3.10\n\n### Steps to Reproduce\n\n1. Upgrade a v3.7 database to v4.0\r\n2. Inspect the `users_user` table\n\n### Expected Behavior\n\nThe `id` column of the `users_user` table should be a `bigint` (64-bit integer), like all other models in NetBox.\n\n### Observed Behavior\n\nThe `id` column is a regular 32-bit integer. This is because we renamed the stock Django table, which uses a 32-bit integer `id` field.\n", "before_files": [{"content": "from django.db import migrations\n\n\ndef update_content_types(apps, schema_editor):\n ContentType = apps.get_model('contenttypes', 'ContentType')\n # Delete the new ContentTypes effected by the new models in the users app\n ContentType.objects.filter(app_label='users', model='user').delete()\n\n # Update the app labels of the original ContentTypes for auth.User to ensure\n # that any foreign key references are preserved\n ContentType.objects.filter(app_label='auth', model='user').update(app_label='users')\n\n netboxuser_ct = ContentType.objects.filter(app_label='users', model='netboxuser').first()\n if netboxuser_ct:\n user_ct = ContentType.objects.filter(app_label='users', model='user').first()\n CustomField = apps.get_model('extras', 'CustomField')\n CustomField.objects.filter(related_object_type_id=netboxuser_ct.id).update(related_object_type_id=user_ct.id)\n netboxuser_ct.delete()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0002_squashed_0004'),\n ]\n\n operations = [\n # The User table was originally created as 'auth_user'. Now we nullify the model's\n # db_table option, so that it defaults to the app & model name (users_user). This\n # causes the database table to be renamed.\n migrations.AlterModelTable(\n name='user',\n table=None,\n ),\n\n # Rename auth_user_* sequences\n migrations.RunSQL(\"ALTER TABLE auth_user_groups_id_seq RENAME TO users_user_groups_id_seq\"),\n migrations.RunSQL(\"ALTER TABLE auth_user_id_seq RENAME TO users_user_id_seq\"),\n migrations.RunSQL(\"ALTER TABLE auth_user_user_permissions_id_seq RENAME TO users_user_user_permissions_id_seq\"),\n\n # Rename auth_user_* indexes\n migrations.RunSQL(\"ALTER INDEX auth_user_pkey RENAME TO users_user_pkey\"),\n # Hash is deterministic; generated via schema_editor._create_index_name()\n migrations.RunSQL(\"ALTER INDEX auth_user_username_6821ab7c_like RENAME TO users_user_username_06e46fe6_like\"),\n migrations.RunSQL(\"ALTER INDEX auth_user_username_key RENAME TO users_user_username_key\"),\n\n # Update ContentTypes\n migrations.RunPython(\n code=update_content_types,\n reverse_code=migrations.RunPython.noop\n ),\n ]\n", "path": "netbox/users/migrations/0005_alter_user_table.py"}], "after_files": [{"content": "from django.db import migrations\n\n\ndef update_content_types(apps, schema_editor):\n ContentType = apps.get_model('contenttypes', 'ContentType')\n # Delete the new ContentTypes effected by the new models in the users app\n ContentType.objects.filter(app_label='users', model='user').delete()\n\n # Update the app labels of the original ContentTypes for auth.User to ensure\n # that any foreign key references are preserved\n ContentType.objects.filter(app_label='auth', model='user').update(app_label='users')\n\n netboxuser_ct = ContentType.objects.filter(app_label='users', model='netboxuser').first()\n if netboxuser_ct:\n user_ct = ContentType.objects.filter(app_label='users', model='user').first()\n CustomField = apps.get_model('extras', 'CustomField')\n CustomField.objects.filter(related_object_type_id=netboxuser_ct.id).update(related_object_type_id=user_ct.id)\n netboxuser_ct.delete()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0002_squashed_0004'),\n ]\n\n operations = [\n # The User table was originally created as 'auth_user'. Now we nullify the model's\n # db_table option, so that it defaults to the app & model name (users_user). This\n # causes the database table to be renamed.\n migrations.AlterModelTable(\n name='user',\n table=None,\n ),\n\n # Convert the `id` column to a 64-bit integer (BigAutoField is implied by DEFAULT_AUTO_FIELD)\n migrations.RunSQL(\"ALTER TABLE users_user ALTER COLUMN id TYPE bigint\"),\n\n # Rename auth_user_* sequences\n migrations.RunSQL(\"ALTER TABLE auth_user_groups_id_seq RENAME TO users_user_groups_id_seq\"),\n migrations.RunSQL(\"ALTER TABLE auth_user_id_seq RENAME TO users_user_id_seq\"),\n migrations.RunSQL(\"ALTER TABLE auth_user_user_permissions_id_seq RENAME TO users_user_user_permissions_id_seq\"),\n\n # Rename auth_user_* indexes\n migrations.RunSQL(\"ALTER INDEX auth_user_pkey RENAME TO users_user_pkey\"),\n # Hash is deterministic; generated via schema_editor._create_index_name()\n migrations.RunSQL(\"ALTER INDEX auth_user_username_6821ab7c_like RENAME TO users_user_username_06e46fe6_like\"),\n migrations.RunSQL(\"ALTER INDEX auth_user_username_key RENAME TO users_user_username_key\"),\n\n # Update ContentTypes\n migrations.RunPython(\n code=update_content_types,\n reverse_code=migrations.RunPython.noop\n ),\n ]\n", "path": "netbox/users/migrations/0005_alter_user_table.py"}]} | 1,037 | 183 |
gh_patches_debug_32866 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-4451 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Documentation for the v2 of the configuration file
At first, I was thinking to automate this given the schema, but the spec isn't very large so we can just hand-write this without too much effort.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/doc_extensions.py`
Content:
```
1 """
2 Read the Docs documentation extensions for Sphinx
3
4 Adds the following roles:
5
6 djangosetting
7 Output an inline literal of the corresponding setting value. Useful for
8 keeping documentation up to date without editing on settings changes.
9 """
10
11 from docutils import nodes, utils
12
13 from django.conf import settings
14
15 from readthedocs.projects.models import Feature
16
17
18 def django_setting_role(typ, rawtext, text, lineno, inliner, options=None,
19 content=None):
20 """Always up to date Django settings from the application"""
21 dj_setting = getattr(settings, utils.unescape(text), 'None')
22 node = nodes.literal(dj_setting, dj_setting)
23 return [node], []
24
25
26 def feature_flags_role(typ, rawtext, text, lineno, inliner, options=None,
27 content=None):
28 """Up to date feature flags from the application."""
29 all_features = Feature.FEATURES
30 requested_feature = utils.unescape(text)
31 for feature in all_features:
32 if requested_feature.lower() == feature[0].lower():
33 desc = nodes.Text(feature[1], feature[1])
34 return [desc], []
35
36
37 def setup(_):
38 from docutils.parsers.rst import roles
39 roles.register_local_role(
40 'djangosetting',
41 django_setting_role
42 )
43 roles.register_local_role(
44 'featureflags',
45 feature_flags_role
46 )
47
48 return {
49 'version': 'builtin',
50 'parallel_read_safe': True,
51 'parallel_write_safe': True,
52 }
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/doc_extensions.py b/docs/doc_extensions.py
--- a/docs/doc_extensions.py
+++ b/docs/doc_extensions.py
@@ -6,11 +6,14 @@
djangosetting
Output an inline literal of the corresponding setting value. Useful for
keeping documentation up to date without editing on settings changes.
-"""
-from docutils import nodes, utils
+buildpyversions
+ Output a comma separated list of the supported python versions for a
+ Read the Docs build image.
+"""
from django.conf import settings
+from docutils import nodes, utils
from readthedocs.projects.models import Feature
@@ -23,8 +26,23 @@
return [node], []
+def python_supported_versions_role(typ, rawtext, text, lineno, inliner,
+ options=None, content=None):
+ """Up to date supported python versions for each build image."""
+ image = '{}:{}'.format(settings.DOCKER_DEFAULT_IMAGE, text)
+ image_settings = settings.DOCKER_IMAGE_SETTINGS[image]
+ python_versions = image_settings['python']['supported_versions']
+ node_list = []
+ separator = ', '
+ for i, version in enumerate(python_versions):
+ node_list.append(nodes.literal(version, version))
+ if i < len(python_versions) - 1:
+ node_list.append(nodes.Text(separator))
+ return (node_list, [])
+
+
def feature_flags_role(typ, rawtext, text, lineno, inliner, options=None,
- content=None):
+ content=None):
"""Up to date feature flags from the application."""
all_features = Feature.FEATURES
requested_feature = utils.unescape(text)
@@ -40,9 +58,13 @@
'djangosetting',
django_setting_role
)
+ roles.register_local_role(
+ 'buildpyversions',
+ python_supported_versions_role,
+ )
roles.register_local_role(
'featureflags',
- feature_flags_role
+ feature_flags_role,
)
return {
| {"golden_diff": "diff --git a/docs/doc_extensions.py b/docs/doc_extensions.py\n--- a/docs/doc_extensions.py\n+++ b/docs/doc_extensions.py\n@@ -6,11 +6,14 @@\n djangosetting\n Output an inline literal of the corresponding setting value. Useful for\n keeping documentation up to date without editing on settings changes.\n-\"\"\"\n \n-from docutils import nodes, utils\n+buildpyversions\n+ Output a comma separated list of the supported python versions for a\n+ Read the Docs build image.\n+\"\"\"\n \n from django.conf import settings\n+from docutils import nodes, utils\n \n from readthedocs.projects.models import Feature\n \n@@ -23,8 +26,23 @@\n return [node], []\n \n \n+def python_supported_versions_role(typ, rawtext, text, lineno, inliner,\n+ options=None, content=None):\n+ \"\"\"Up to date supported python versions for each build image.\"\"\"\n+ image = '{}:{}'.format(settings.DOCKER_DEFAULT_IMAGE, text)\n+ image_settings = settings.DOCKER_IMAGE_SETTINGS[image]\n+ python_versions = image_settings['python']['supported_versions']\n+ node_list = []\n+ separator = ', '\n+ for i, version in enumerate(python_versions):\n+ node_list.append(nodes.literal(version, version))\n+ if i < len(python_versions) - 1:\n+ node_list.append(nodes.Text(separator))\n+ return (node_list, [])\n+\n+\n def feature_flags_role(typ, rawtext, text, lineno, inliner, options=None,\n- content=None):\n+ content=None):\n \"\"\"Up to date feature flags from the application.\"\"\"\n all_features = Feature.FEATURES\n requested_feature = utils.unescape(text)\n@@ -40,9 +58,13 @@\n 'djangosetting',\n django_setting_role\n )\n+ roles.register_local_role(\n+ 'buildpyversions',\n+ python_supported_versions_role,\n+ )\n roles.register_local_role(\n 'featureflags',\n- feature_flags_role\n+ feature_flags_role,\n )\n \n return {\n", "issue": "Documentation for the v2 of the configuration file\nAt first, I was thinking to automate this given the schema, but the spec isn't very large so we can just hand-write this without too much effort.\n", "before_files": [{"content": "\"\"\"\nRead the Docs documentation extensions for Sphinx\n\nAdds the following roles:\n\ndjangosetting\n Output an inline literal of the corresponding setting value. Useful for\n keeping documentation up to date without editing on settings changes.\n\"\"\"\n\nfrom docutils import nodes, utils\n\nfrom django.conf import settings\n\nfrom readthedocs.projects.models import Feature\n\n\ndef django_setting_role(typ, rawtext, text, lineno, inliner, options=None,\n content=None):\n \"\"\"Always up to date Django settings from the application\"\"\"\n dj_setting = getattr(settings, utils.unescape(text), 'None')\n node = nodes.literal(dj_setting, dj_setting)\n return [node], []\n\n\ndef feature_flags_role(typ, rawtext, text, lineno, inliner, options=None,\n content=None):\n \"\"\"Up to date feature flags from the application.\"\"\"\n all_features = Feature.FEATURES\n requested_feature = utils.unescape(text)\n for feature in all_features:\n if requested_feature.lower() == feature[0].lower():\n desc = nodes.Text(feature[1], feature[1])\n return [desc], []\n\n\ndef setup(_):\n from docutils.parsers.rst import roles\n roles.register_local_role(\n 'djangosetting',\n django_setting_role\n )\n roles.register_local_role(\n 'featureflags',\n feature_flags_role\n )\n\n return {\n 'version': 'builtin',\n 'parallel_read_safe': True,\n 'parallel_write_safe': True,\n }\n", "path": "docs/doc_extensions.py"}], "after_files": [{"content": "\"\"\"\nRead the Docs documentation extensions for Sphinx\n\nAdds the following roles:\n\ndjangosetting\n Output an inline literal of the corresponding setting value. Useful for\n keeping documentation up to date without editing on settings changes.\n\nbuildpyversions\n Output a comma separated list of the supported python versions for a\n Read the Docs build image.\n\"\"\"\n\nfrom django.conf import settings\nfrom docutils import nodes, utils\n\nfrom readthedocs.projects.models import Feature\n\n\ndef django_setting_role(typ, rawtext, text, lineno, inliner, options=None,\n content=None):\n \"\"\"Always up to date Django settings from the application\"\"\"\n dj_setting = getattr(settings, utils.unescape(text), 'None')\n node = nodes.literal(dj_setting, dj_setting)\n return [node], []\n\n\ndef python_supported_versions_role(typ, rawtext, text, lineno, inliner,\n options=None, content=None):\n \"\"\"Up to date supported python versions for each build image.\"\"\"\n image = '{}:{}'.format(settings.DOCKER_DEFAULT_IMAGE, text)\n image_settings = settings.DOCKER_IMAGE_SETTINGS[image]\n python_versions = image_settings['python']['supported_versions']\n node_list = []\n separator = ', '\n for i, version in enumerate(python_versions):\n node_list.append(nodes.literal(version, version))\n if i < len(python_versions) - 1:\n node_list.append(nodes.Text(separator))\n return (node_list, [])\n\n\ndef feature_flags_role(typ, rawtext, text, lineno, inliner, options=None,\n content=None):\n \"\"\"Up to date feature flags from the application.\"\"\"\n all_features = Feature.FEATURES\n requested_feature = utils.unescape(text)\n for feature in all_features:\n if requested_feature.lower() == feature[0].lower():\n desc = nodes.Text(feature[1], feature[1])\n return [desc], []\n\n\ndef setup(_):\n from docutils.parsers.rst import roles\n roles.register_local_role(\n 'djangosetting',\n django_setting_role\n )\n roles.register_local_role(\n 'buildpyversions',\n python_supported_versions_role,\n )\n roles.register_local_role(\n 'featureflags',\n feature_flags_role,\n )\n\n return {\n 'version': 'builtin',\n 'parallel_read_safe': True,\n 'parallel_write_safe': True,\n }\n", "path": "docs/doc_extensions.py"}]} | 717 | 446 |
gh_patches_debug_4629 | rasdani/github-patches | git_diff | pyro-ppl__pyro-2244 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ConditionedPlanarFlow's event_dim is wrong [bug]
--------------------------------------------------------------------------------------------------
### Issue Description
Provide a brief description of the issue.
I think the ConditionedPlanarFlow's event_dim is incorrect. I think the class needs to be set as such:
```
class ConditionedPlanarFlow(Transform):
event_dim=1 #this line is not in the current release version
def __init__(self, bias=None, u=None, w=None):
super(ConditionedPlanarFlow, self).__init__(cache_size=1)
self.bias = bias
self.u = u
self.w = w
self._cached_logDetJ = None
```
The Transform super class default is event_dim=0.
Maybe I misunderstand this class, but I notice the PlanarFlow class has event_dim=1, as do many of the other normalizing flows and when I tried using this flow for my models my ELBO would go negative. I think the confusion is that ConditionalPlanarFlow has the event_dim=1, but that isn't setting it when it the actual flows are created (from what I can tell anyways).
### Environment
For any bugs, please provide the following:
- Found on: Ubuntu
- PyTorch version: I think I was using 1.2 ...but again this is a super minor bug
You can see it in the the pyro docs http://docs.pyro.ai/en/stable/_modules/pyro/distributions/transforms/planar.html#ConditionalPlanarFlow
- Pyro version: 3.6.10 (I think...pretty sure a single boolean value is pretty robust to version though)
Was originally found in...nightly build (not sure on term... I built from sourceb asically)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyro/distributions/transforms/planar.py`
Content:
```
1 import math
2
3 import torch
4 import torch.nn as nn
5 from torch.distributions import constraints
6 import torch.nn.functional as F
7
8 from torch.distributions import Transform
9 from pyro.distributions.conditional import ConditionalTransformModule
10 from pyro.distributions.torch_transform import TransformModule
11 from pyro.distributions.util import copy_docs_from
12 from pyro.nn import DenseNN
13
14
15 @copy_docs_from(Transform)
16 class ConditionedPlanar(Transform):
17 def __init__(self, bias=None, u=None, w=None):
18 super(ConditionedPlanar, self).__init__(cache_size=1)
19 self.bias = bias
20 self.u = u
21 self.w = w
22 self._cached_logDetJ = None
23
24 # This method ensures that torch(u_hat, w) > -1, required for invertibility
25 def u_hat(self, u, w):
26 alpha = torch.matmul(u.unsqueeze(-2), w.unsqueeze(-1)).squeeze(-1)
27 a_prime = -1 + F.softplus(alpha)
28 return u + (a_prime - alpha) * w.div(w.pow(2).sum(dim=-1, keepdim=True))
29
30 def _call(self, x):
31 """
32 :param x: the input into the bijection
33 :type x: torch.Tensor
34 Invokes the bijection x => y; in the prototypical context of a
35 :class:`~pyro.distributions.TransformedDistribution` `x` is a sample from the base distribution (or the output
36 of a previous transform)
37 """
38
39 # x ~ (batch_size, dim_size, 1)
40 # w ~ (batch_size, 1, dim_size)
41 # bias ~ (batch_size, 1)
42 act = torch.tanh(torch.matmul(self.w.unsqueeze(-2), x.unsqueeze(-1)).squeeze(-1) + self.bias)
43 u_hat = self.u_hat(self.u, self.w)
44 y = x + u_hat * act
45
46 psi_z = (1. - act.pow(2)) * self.w
47 self._cached_logDetJ = torch.log(
48 torch.abs(1 + torch.matmul(psi_z.unsqueeze(-2), u_hat.unsqueeze(-1)).squeeze(-1).squeeze(-1)))
49
50 return y
51
52 def _inverse(self, y):
53 """
54 :param y: the output of the bijection
55 :type y: torch.Tensor
56 Inverts y => x. As noted above, this implementation is incapable of inverting arbitrary values
57 `y`; rather it assumes `y` is the result of a previously computed application of the bijector
58 to some `x` (which was cached on the forward call)
59 """
60
61 raise KeyError("ConditionalPlanar object expected to find key in intermediates cache but didn't")
62
63 def log_abs_det_jacobian(self, x, y):
64 """
65 Calculates the elementwise determinant of the log Jacobian
66 """
67 return self._cached_logDetJ
68
69
70 @copy_docs_from(ConditionedPlanar)
71 class Planar(ConditionedPlanar, TransformModule):
72 """
73 A 'planar' bijective transform with equation,
74
75 :math:`\\mathbf{y} = \\mathbf{x} + \\mathbf{u}\\tanh(\\mathbf{w}^T\\mathbf{z}+b)`
76
77 where :math:`\\mathbf{x}` are the inputs, :math:`\\mathbf{y}` are the outputs, and the learnable parameters
78 are :math:`b\\in\\mathbb{R}`, :math:`\\mathbf{u}\\in\\mathbb{R}^D`, :math:`\\mathbf{w}\\in\\mathbb{R}^D` for input
79 dimension :math:`D`. For this to be an invertible transformation, the condition
80 :math:`\\mathbf{w}^T\\mathbf{u}>-1` is enforced.
81
82 Together with :class:`~pyro.distributions.TransformedDistribution` this provides a way to create richer
83 variational approximations.
84
85 Example usage:
86
87 >>> base_dist = dist.Normal(torch.zeros(10), torch.ones(10))
88 >>> transform = Planar(10)
89 >>> pyro.module("my_transform", transform) # doctest: +SKIP
90 >>> flow_dist = dist.TransformedDistribution(base_dist, [transform])
91 >>> flow_dist.sample() # doctest: +SKIP
92 tensor([-0.4071, -0.5030, 0.7924, -0.2366, -0.2387, -0.1417, 0.0868,
93 0.1389, -0.4629, 0.0986])
94
95 The inverse of this transform does not possess an analytical solution and is left unimplemented. However,
96 the inverse is cached when the forward operation is called during sampling, and so samples drawn using
97 the planar transform can be scored.
98
99 :param input_dim: the dimension of the input (and output) variable.
100 :type input_dim: int
101
102 References:
103
104 Variational Inference with Normalizing Flows [arXiv:1505.05770]
105 Danilo Jimenez Rezende, Shakir Mohamed
106
107 """
108
109 domain = constraints.real
110 codomain = constraints.real
111 bijective = True
112 event_dim = 1
113
114 def __init__(self, input_dim):
115 super(Planar, self).__init__()
116
117 self.bias = nn.Parameter(torch.Tensor(1,))
118 self.u = nn.Parameter(torch.Tensor(input_dim,))
119 self.w = nn.Parameter(torch.Tensor(input_dim,))
120 self.input_dim = input_dim
121 self.reset_parameters()
122
123 def reset_parameters(self):
124 stdv = 1. / math.sqrt(self.u.size(0))
125 self.w.data.uniform_(-stdv, stdv)
126 self.u.data.uniform_(-stdv, stdv)
127 self.bias.data.zero_()
128
129
130 @copy_docs_from(ConditionalTransformModule)
131 class ConditionalPlanar(ConditionalTransformModule):
132 """
133 A conditional 'planar' bijective transform using the equation,
134
135 :math:`\\mathbf{y} = \\mathbf{x} + \\mathbf{u}\\tanh(\\mathbf{w}^T\\mathbf{z}+b)`
136
137 where :math:`\\mathbf{x}` are the inputs with dimension :math:`D`, :math:`\\mathbf{y}` are the outputs,
138 and the pseudo-parameters :math:`b\\in\\mathbb{R}`, :math:`\\mathbf{u}\\in\\mathbb{R}^D`, and
139 :math:`\\mathbf{w}\\in\\mathbb{R}^D` are the output of a function, e.g. a NN, with input
140 :math:`z\\in\\mathbb{R}^{M}` representing the context variable to condition on. For this to be an
141 invertible transformation, the condition :math:`\\mathbf{w}^T\\mathbf{u}>-1` is enforced.
142
143 Together with :class:`~pyro.distributions.ConditionalTransformedDistribution` this provides a way to create
144 richer variational approximations.
145
146 Example usage:
147
148 >>> from pyro.nn.dense_nn import DenseNN
149 >>> input_dim = 10
150 >>> context_dim = 5
151 >>> batch_size = 3
152 >>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))
153 >>> hypernet = DenseNN(context_dim, [50, 50], param_dims=[1, input_dim, input_dim])
154 >>> transform = ConditionalPlanar(hypernet)
155 >>> z = torch.rand(batch_size, context_dim)
156 >>> flow_dist = dist.ConditionalTransformedDistribution(base_dist, [transform]).condition(z)
157 >>> flow_dist.sample(sample_shape=torch.Size([batch_size])) # doctest: +SKIP
158
159 The inverse of this transform does not possess an analytical solution and is left unimplemented. However,
160 the inverse is cached when the forward operation is called during sampling, and so samples drawn using
161 the planar transform can be scored.
162
163 :param nn: a function inputting the context variable and outputting a triplet of real-valued parameters
164 of dimensions :math:`(1, D, D)`.
165 :type nn: callable
166
167 References:
168 Variational Inference with Normalizing Flows [arXiv:1505.05770]
169 Danilo Jimenez Rezende, Shakir Mohamed
170
171 """
172
173 domain = constraints.real
174 codomain = constraints.real
175 bijective = True
176 event_dim = 1
177
178 def __init__(self, nn):
179 super(ConditionalPlanar, self).__init__()
180 self.nn = nn
181
182 def condition(self, context):
183 bias, u, w = self.nn(context)
184 return ConditionedPlanar(bias, u, w)
185
186
187 def planar(input_dim):
188 """
189 A helper function to create a :class:`~pyro.distributions.transforms.Planar` object for consistency with other
190 helpers.
191
192 :param input_dim: Dimension of input variable
193 :type input_dim: int
194
195 """
196
197 return Planar(input_dim)
198
199
200 def conditional_planar(input_dim, context_dim, hidden_dims=None):
201 """
202 A helper function to create a :class:`~pyro.distributions.transforms.ConditionalPlanar` object that takes care of
203 constructing a dense network with the correct input/output dimensions.
204
205 :param input_dim: Dimension of input variable
206 :type input_dim: int
207 :param context_dim: Dimension of context variable
208 :type context_dim: int
209 :param hidden_dims: The desired hidden dimensions of the dense network. Defaults
210 to using [input_dim * 10, input_dim * 10]
211 :type hidden_dims: list[int]
212
213
214 """
215
216 if hidden_dims is None:
217 hidden_dims = [input_dim * 10, input_dim * 10]
218 hypernet = DenseNN(context_dim, hidden_dims, param_dims=[1, input_dim, input_dim])
219 return ConditionalPlanar(hypernet)
220
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyro/distributions/transforms/planar.py b/pyro/distributions/transforms/planar.py
--- a/pyro/distributions/transforms/planar.py
+++ b/pyro/distributions/transforms/planar.py
@@ -14,6 +14,11 @@
@copy_docs_from(Transform)
class ConditionedPlanar(Transform):
+ domain = constraints.real
+ codomain = constraints.real
+ bijective = True
+ event_dim = 1
+
def __init__(self, bias=None, u=None, w=None):
super(ConditionedPlanar, self).__init__(cache_size=1)
self.bias = bias
| {"golden_diff": "diff --git a/pyro/distributions/transforms/planar.py b/pyro/distributions/transforms/planar.py\n--- a/pyro/distributions/transforms/planar.py\n+++ b/pyro/distributions/transforms/planar.py\n@@ -14,6 +14,11 @@\n \n @copy_docs_from(Transform)\n class ConditionedPlanar(Transform):\n+ domain = constraints.real\n+ codomain = constraints.real\n+ bijective = True\n+ event_dim = 1\n+\n def __init__(self, bias=None, u=None, w=None):\n super(ConditionedPlanar, self).__init__(cache_size=1)\n self.bias = bias\n", "issue": "ConditionedPlanarFlow's event_dim is wrong [bug]\n\r\n--------------------------------------------------------------------------------------------------\r\n### Issue Description\r\nProvide a brief description of the issue.\r\n\r\nI think the ConditionedPlanarFlow's event_dim is incorrect. I think the class needs to be set as such:\r\n```\r\n\r\nclass ConditionedPlanarFlow(Transform):\r\n event_dim=1 #this line is not in the current release version\r\n def __init__(self, bias=None, u=None, w=None):\r\n super(ConditionedPlanarFlow, self).__init__(cache_size=1)\r\n self.bias = bias\r\n self.u = u\r\n self.w = w\r\n self._cached_logDetJ = None\r\n\r\n```\r\nThe Transform super class default is event_dim=0.\r\n\r\nMaybe I misunderstand this class, but I notice the PlanarFlow class has event_dim=1, as do many of the other normalizing flows and when I tried using this flow for my models my ELBO would go negative. I think the confusion is that ConditionalPlanarFlow has the event_dim=1, but that isn't setting it when it the actual flows are created (from what I can tell anyways).\r\n\r\n### Environment\r\nFor any bugs, please provide the following:\r\n - Found on: Ubuntu \r\n - PyTorch version: I think I was using 1.2 ...but again this is a super minor bug\r\nYou can see it in the the pyro docs http://docs.pyro.ai/en/stable/_modules/pyro/distributions/transforms/planar.html#ConditionalPlanarFlow\r\n\r\n - Pyro version: 3.6.10 (I think...pretty sure a single boolean value is pretty robust to version though)\r\nWas originally found in...nightly build (not sure on term... I built from sourceb asically)\r\n\r\n\n", "before_files": [{"content": "import math\n\nimport torch\nimport torch.nn as nn\nfrom torch.distributions import constraints\nimport torch.nn.functional as F\n\nfrom torch.distributions import Transform\nfrom pyro.distributions.conditional import ConditionalTransformModule\nfrom pyro.distributions.torch_transform import TransformModule\nfrom pyro.distributions.util import copy_docs_from\nfrom pyro.nn import DenseNN\n\n\n@copy_docs_from(Transform)\nclass ConditionedPlanar(Transform):\n def __init__(self, bias=None, u=None, w=None):\n super(ConditionedPlanar, self).__init__(cache_size=1)\n self.bias = bias\n self.u = u\n self.w = w\n self._cached_logDetJ = None\n\n # This method ensures that torch(u_hat, w) > -1, required for invertibility\n def u_hat(self, u, w):\n alpha = torch.matmul(u.unsqueeze(-2), w.unsqueeze(-1)).squeeze(-1)\n a_prime = -1 + F.softplus(alpha)\n return u + (a_prime - alpha) * w.div(w.pow(2).sum(dim=-1, keepdim=True))\n\n def _call(self, x):\n \"\"\"\n :param x: the input into the bijection\n :type x: torch.Tensor\n Invokes the bijection x => y; in the prototypical context of a\n :class:`~pyro.distributions.TransformedDistribution` `x` is a sample from the base distribution (or the output\n of a previous transform)\n \"\"\"\n\n # x ~ (batch_size, dim_size, 1)\n # w ~ (batch_size, 1, dim_size)\n # bias ~ (batch_size, 1)\n act = torch.tanh(torch.matmul(self.w.unsqueeze(-2), x.unsqueeze(-1)).squeeze(-1) + self.bias)\n u_hat = self.u_hat(self.u, self.w)\n y = x + u_hat * act\n\n psi_z = (1. - act.pow(2)) * self.w\n self._cached_logDetJ = torch.log(\n torch.abs(1 + torch.matmul(psi_z.unsqueeze(-2), u_hat.unsqueeze(-1)).squeeze(-1).squeeze(-1)))\n\n return y\n\n def _inverse(self, y):\n \"\"\"\n :param y: the output of the bijection\n :type y: torch.Tensor\n Inverts y => x. As noted above, this implementation is incapable of inverting arbitrary values\n `y`; rather it assumes `y` is the result of a previously computed application of the bijector\n to some `x` (which was cached on the forward call)\n \"\"\"\n\n raise KeyError(\"ConditionalPlanar object expected to find key in intermediates cache but didn't\")\n\n def log_abs_det_jacobian(self, x, y):\n \"\"\"\n Calculates the elementwise determinant of the log Jacobian\n \"\"\"\n return self._cached_logDetJ\n\n\n@copy_docs_from(ConditionedPlanar)\nclass Planar(ConditionedPlanar, TransformModule):\n \"\"\"\n A 'planar' bijective transform with equation,\n\n :math:`\\\\mathbf{y} = \\\\mathbf{x} + \\\\mathbf{u}\\\\tanh(\\\\mathbf{w}^T\\\\mathbf{z}+b)`\n\n where :math:`\\\\mathbf{x}` are the inputs, :math:`\\\\mathbf{y}` are the outputs, and the learnable parameters\n are :math:`b\\\\in\\\\mathbb{R}`, :math:`\\\\mathbf{u}\\\\in\\\\mathbb{R}^D`, :math:`\\\\mathbf{w}\\\\in\\\\mathbb{R}^D` for input\n dimension :math:`D`. For this to be an invertible transformation, the condition\n :math:`\\\\mathbf{w}^T\\\\mathbf{u}>-1` is enforced.\n\n Together with :class:`~pyro.distributions.TransformedDistribution` this provides a way to create richer\n variational approximations.\n\n Example usage:\n\n >>> base_dist = dist.Normal(torch.zeros(10), torch.ones(10))\n >>> transform = Planar(10)\n >>> pyro.module(\"my_transform\", transform) # doctest: +SKIP\n >>> flow_dist = dist.TransformedDistribution(base_dist, [transform])\n >>> flow_dist.sample() # doctest: +SKIP\n tensor([-0.4071, -0.5030, 0.7924, -0.2366, -0.2387, -0.1417, 0.0868,\n 0.1389, -0.4629, 0.0986])\n\n The inverse of this transform does not possess an analytical solution and is left unimplemented. However,\n the inverse is cached when the forward operation is called during sampling, and so samples drawn using\n the planar transform can be scored.\n\n :param input_dim: the dimension of the input (and output) variable.\n :type input_dim: int\n\n References:\n\n Variational Inference with Normalizing Flows [arXiv:1505.05770]\n Danilo Jimenez Rezende, Shakir Mohamed\n\n \"\"\"\n\n domain = constraints.real\n codomain = constraints.real\n bijective = True\n event_dim = 1\n\n def __init__(self, input_dim):\n super(Planar, self).__init__()\n\n self.bias = nn.Parameter(torch.Tensor(1,))\n self.u = nn.Parameter(torch.Tensor(input_dim,))\n self.w = nn.Parameter(torch.Tensor(input_dim,))\n self.input_dim = input_dim\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.u.size(0))\n self.w.data.uniform_(-stdv, stdv)\n self.u.data.uniform_(-stdv, stdv)\n self.bias.data.zero_()\n\n\n@copy_docs_from(ConditionalTransformModule)\nclass ConditionalPlanar(ConditionalTransformModule):\n \"\"\"\n A conditional 'planar' bijective transform using the equation,\n\n :math:`\\\\mathbf{y} = \\\\mathbf{x} + \\\\mathbf{u}\\\\tanh(\\\\mathbf{w}^T\\\\mathbf{z}+b)`\n\n where :math:`\\\\mathbf{x}` are the inputs with dimension :math:`D`, :math:`\\\\mathbf{y}` are the outputs,\n and the pseudo-parameters :math:`b\\\\in\\\\mathbb{R}`, :math:`\\\\mathbf{u}\\\\in\\\\mathbb{R}^D`, and\n :math:`\\\\mathbf{w}\\\\in\\\\mathbb{R}^D` are the output of a function, e.g. a NN, with input\n :math:`z\\\\in\\\\mathbb{R}^{M}` representing the context variable to condition on. For this to be an\n invertible transformation, the condition :math:`\\\\mathbf{w}^T\\\\mathbf{u}>-1` is enforced.\n\n Together with :class:`~pyro.distributions.ConditionalTransformedDistribution` this provides a way to create\n richer variational approximations.\n\n Example usage:\n\n >>> from pyro.nn.dense_nn import DenseNN\n >>> input_dim = 10\n >>> context_dim = 5\n >>> batch_size = 3\n >>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))\n >>> hypernet = DenseNN(context_dim, [50, 50], param_dims=[1, input_dim, input_dim])\n >>> transform = ConditionalPlanar(hypernet)\n >>> z = torch.rand(batch_size, context_dim)\n >>> flow_dist = dist.ConditionalTransformedDistribution(base_dist, [transform]).condition(z)\n >>> flow_dist.sample(sample_shape=torch.Size([batch_size])) # doctest: +SKIP\n\n The inverse of this transform does not possess an analytical solution and is left unimplemented. However,\n the inverse is cached when the forward operation is called during sampling, and so samples drawn using\n the planar transform can be scored.\n\n :param nn: a function inputting the context variable and outputting a triplet of real-valued parameters\n of dimensions :math:`(1, D, D)`.\n :type nn: callable\n\n References:\n Variational Inference with Normalizing Flows [arXiv:1505.05770]\n Danilo Jimenez Rezende, Shakir Mohamed\n\n \"\"\"\n\n domain = constraints.real\n codomain = constraints.real\n bijective = True\n event_dim = 1\n\n def __init__(self, nn):\n super(ConditionalPlanar, self).__init__()\n self.nn = nn\n\n def condition(self, context):\n bias, u, w = self.nn(context)\n return ConditionedPlanar(bias, u, w)\n\n\ndef planar(input_dim):\n \"\"\"\n A helper function to create a :class:`~pyro.distributions.transforms.Planar` object for consistency with other\n helpers.\n\n :param input_dim: Dimension of input variable\n :type input_dim: int\n\n \"\"\"\n\n return Planar(input_dim)\n\n\ndef conditional_planar(input_dim, context_dim, hidden_dims=None):\n \"\"\"\n A helper function to create a :class:`~pyro.distributions.transforms.ConditionalPlanar` object that takes care of\n constructing a dense network with the correct input/output dimensions.\n\n :param input_dim: Dimension of input variable\n :type input_dim: int\n :param context_dim: Dimension of context variable\n :type context_dim: int\n :param hidden_dims: The desired hidden dimensions of the dense network. Defaults\n to using [input_dim * 10, input_dim * 10]\n :type hidden_dims: list[int]\n\n\n \"\"\"\n\n if hidden_dims is None:\n hidden_dims = [input_dim * 10, input_dim * 10]\n hypernet = DenseNN(context_dim, hidden_dims, param_dims=[1, input_dim, input_dim])\n return ConditionalPlanar(hypernet)\n", "path": "pyro/distributions/transforms/planar.py"}], "after_files": [{"content": "import math\n\nimport torch\nimport torch.nn as nn\nfrom torch.distributions import constraints\nimport torch.nn.functional as F\n\nfrom torch.distributions import Transform\nfrom pyro.distributions.conditional import ConditionalTransformModule\nfrom pyro.distributions.torch_transform import TransformModule\nfrom pyro.distributions.util import copy_docs_from\nfrom pyro.nn import DenseNN\n\n\n@copy_docs_from(Transform)\nclass ConditionedPlanar(Transform):\n domain = constraints.real\n codomain = constraints.real\n bijective = True\n event_dim = 1\n\n def __init__(self, bias=None, u=None, w=None):\n super(ConditionedPlanar, self).__init__(cache_size=1)\n self.bias = bias\n self.u = u\n self.w = w\n self._cached_logDetJ = None\n\n # This method ensures that torch(u_hat, w) > -1, required for invertibility\n def u_hat(self, u, w):\n alpha = torch.matmul(u.unsqueeze(-2), w.unsqueeze(-1)).squeeze(-1)\n a_prime = -1 + F.softplus(alpha)\n return u + (a_prime - alpha) * w.div(w.pow(2).sum(dim=-1, keepdim=True))\n\n def _call(self, x):\n \"\"\"\n :param x: the input into the bijection\n :type x: torch.Tensor\n Invokes the bijection x => y; in the prototypical context of a\n :class:`~pyro.distributions.TransformedDistribution` `x` is a sample from the base distribution (or the output\n of a previous transform)\n \"\"\"\n\n # x ~ (batch_size, dim_size, 1)\n # w ~ (batch_size, 1, dim_size)\n # bias ~ (batch_size, 1)\n act = torch.tanh(torch.matmul(self.w.unsqueeze(-2), x.unsqueeze(-1)).squeeze(-1) + self.bias)\n u_hat = self.u_hat(self.u, self.w)\n y = x + u_hat * act\n\n psi_z = (1. - act.pow(2)) * self.w\n self._cached_logDetJ = torch.log(\n torch.abs(1 + torch.matmul(psi_z.unsqueeze(-2), u_hat.unsqueeze(-1)).squeeze(-1).squeeze(-1)))\n\n return y\n\n def _inverse(self, y):\n \"\"\"\n :param y: the output of the bijection\n :type y: torch.Tensor\n Inverts y => x. As noted above, this implementation is incapable of inverting arbitrary values\n `y`; rather it assumes `y` is the result of a previously computed application of the bijector\n to some `x` (which was cached on the forward call)\n \"\"\"\n\n raise KeyError(\"ConditionalPlanar object expected to find key in intermediates cache but didn't\")\n\n def log_abs_det_jacobian(self, x, y):\n \"\"\"\n Calculates the elementwise determinant of the log Jacobian\n \"\"\"\n return self._cached_logDetJ\n\n\n@copy_docs_from(ConditionedPlanar)\nclass Planar(ConditionedPlanar, TransformModule):\n \"\"\"\n A 'planar' bijective transform with equation,\n\n :math:`\\\\mathbf{y} = \\\\mathbf{x} + \\\\mathbf{u}\\\\tanh(\\\\mathbf{w}^T\\\\mathbf{z}+b)`\n\n where :math:`\\\\mathbf{x}` are the inputs, :math:`\\\\mathbf{y}` are the outputs, and the learnable parameters\n are :math:`b\\\\in\\\\mathbb{R}`, :math:`\\\\mathbf{u}\\\\in\\\\mathbb{R}^D`, :math:`\\\\mathbf{w}\\\\in\\\\mathbb{R}^D` for input\n dimension :math:`D`. For this to be an invertible transformation, the condition\n :math:`\\\\mathbf{w}^T\\\\mathbf{u}>-1` is enforced.\n\n Together with :class:`~pyro.distributions.TransformedDistribution` this provides a way to create richer\n variational approximations.\n\n Example usage:\n\n >>> base_dist = dist.Normal(torch.zeros(10), torch.ones(10))\n >>> transform = Planar(10)\n >>> pyro.module(\"my_transform\", transform) # doctest: +SKIP\n >>> flow_dist = dist.TransformedDistribution(base_dist, [transform])\n >>> flow_dist.sample() # doctest: +SKIP\n tensor([-0.4071, -0.5030, 0.7924, -0.2366, -0.2387, -0.1417, 0.0868,\n 0.1389, -0.4629, 0.0986])\n\n The inverse of this transform does not possess an analytical solution and is left unimplemented. However,\n the inverse is cached when the forward operation is called during sampling, and so samples drawn using\n the planar transform can be scored.\n\n :param input_dim: the dimension of the input (and output) variable.\n :type input_dim: int\n\n References:\n\n Variational Inference with Normalizing Flows [arXiv:1505.05770]\n Danilo Jimenez Rezende, Shakir Mohamed\n\n \"\"\"\n\n domain = constraints.real\n codomain = constraints.real\n bijective = True\n event_dim = 1\n\n def __init__(self, input_dim):\n super(Planar, self).__init__()\n\n self.bias = nn.Parameter(torch.Tensor(1,))\n self.u = nn.Parameter(torch.Tensor(input_dim,))\n self.w = nn.Parameter(torch.Tensor(input_dim,))\n self.input_dim = input_dim\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.u.size(0))\n self.w.data.uniform_(-stdv, stdv)\n self.u.data.uniform_(-stdv, stdv)\n self.bias.data.zero_()\n\n\n@copy_docs_from(ConditionalTransformModule)\nclass ConditionalPlanar(ConditionalTransformModule):\n \"\"\"\n A conditional 'planar' bijective transform using the equation,\n\n :math:`\\\\mathbf{y} = \\\\mathbf{x} + \\\\mathbf{u}\\\\tanh(\\\\mathbf{w}^T\\\\mathbf{z}+b)`\n\n where :math:`\\\\mathbf{x}` are the inputs with dimension :math:`D`, :math:`\\\\mathbf{y}` are the outputs,\n and the pseudo-parameters :math:`b\\\\in\\\\mathbb{R}`, :math:`\\\\mathbf{u}\\\\in\\\\mathbb{R}^D`, and\n :math:`\\\\mathbf{w}\\\\in\\\\mathbb{R}^D` are the output of a function, e.g. a NN, with input\n :math:`z\\\\in\\\\mathbb{R}^{M}` representing the context variable to condition on. For this to be an\n invertible transformation, the condition :math:`\\\\mathbf{w}^T\\\\mathbf{u}>-1` is enforced.\n\n Together with :class:`~pyro.distributions.ConditionalTransformedDistribution` this provides a way to create\n richer variational approximations.\n\n Example usage:\n\n >>> from pyro.nn.dense_nn import DenseNN\n >>> input_dim = 10\n >>> context_dim = 5\n >>> batch_size = 3\n >>> base_dist = dist.Normal(torch.zeros(input_dim), torch.ones(input_dim))\n >>> hypernet = DenseNN(context_dim, [50, 50], param_dims=[1, input_dim, input_dim])\n >>> transform = ConditionalPlanar(hypernet)\n >>> z = torch.rand(batch_size, context_dim)\n >>> flow_dist = dist.ConditionalTransformedDistribution(base_dist, [transform]).condition(z)\n >>> flow_dist.sample(sample_shape=torch.Size([batch_size])) # doctest: +SKIP\n\n The inverse of this transform does not possess an analytical solution and is left unimplemented. However,\n the inverse is cached when the forward operation is called during sampling, and so samples drawn using\n the planar transform can be scored.\n\n :param nn: a function inputting the context variable and outputting a triplet of real-valued parameters\n of dimensions :math:`(1, D, D)`.\n :type nn: callable\n\n References:\n Variational Inference with Normalizing Flows [arXiv:1505.05770]\n Danilo Jimenez Rezende, Shakir Mohamed\n\n \"\"\"\n\n domain = constraints.real\n codomain = constraints.real\n bijective = True\n event_dim = 1\n\n def __init__(self, nn):\n super(ConditionalPlanar, self).__init__()\n self.nn = nn\n\n def condition(self, context):\n bias, u, w = self.nn(context)\n return ConditionedPlanar(bias, u, w)\n\n\ndef planar(input_dim):\n \"\"\"\n A helper function to create a :class:`~pyro.distributions.transforms.Planar` object for consistency with other\n helpers.\n\n :param input_dim: Dimension of input variable\n :type input_dim: int\n\n \"\"\"\n\n return Planar(input_dim)\n\n\ndef conditional_planar(input_dim, context_dim, hidden_dims=None):\n \"\"\"\n A helper function to create a :class:`~pyro.distributions.transforms.ConditionalPlanar` object that takes care of\n constructing a dense network with the correct input/output dimensions.\n\n :param input_dim: Dimension of input variable\n :type input_dim: int\n :param context_dim: Dimension of context variable\n :type context_dim: int\n :param hidden_dims: The desired hidden dimensions of the dense network. Defaults\n to using [input_dim * 10, input_dim * 10]\n :type hidden_dims: list[int]\n\n\n \"\"\"\n\n if hidden_dims is None:\n hidden_dims = [input_dim * 10, input_dim * 10]\n hypernet = DenseNN(context_dim, hidden_dims, param_dims=[1, input_dim, input_dim])\n return ConditionalPlanar(hypernet)\n", "path": "pyro/distributions/transforms/planar.py"}]} | 3,437 | 149 |
gh_patches_debug_2000 | rasdani/github-patches | git_diff | automl__auto-sklearn-190 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add warning if dependencies are not met
There should be a warning if one of the following dependencies is not met:
- scikit-learn==0.17
- smac==0.0.1
- lockfile>=0.10
- ConfigSpace>=0.2.1
- pyrfr==0.2.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `autosklearn/util/dependencies.py`
Content:
```
1 from warnings import warn
2
3 import pkg_resources
4 import re
5
6 from distutils.version import LooseVersion
7
8
9 RE_PATTERN = re.compile('^(?P<name>[\w\-]+)((?P<operation>==|>=|>)(?P<version>(\d+\.)?(\d+\.)?(\d+)))?$')
10
11
12 def verify_packages(packages):
13 if not packages:
14 return
15 if isinstance(packages, str):
16 packages = packages.splitlines()
17
18 for package in packages:
19 if not package:
20 continue
21
22 match = RE_PATTERN.match(package)
23 if match:
24 name = match.group('name')
25 operation = match.group('operation')
26 version = match.group('version')
27 _verify_package(name, operation, version)
28 else:
29 raise ValueError('Unable to read requirement: %s' % package)
30
31
32 def _verify_package(name, operation, version):
33 try:
34 module = pkg_resources.get_distribution(name)
35 except pkg_resources.DistributionNotFound:
36 raise MissingPackageError(name) from None
37
38 if not operation:
39 return
40
41 required_version = LooseVersion(version)
42 installed_version = LooseVersion(module.version)
43
44 if operation == '==':
45 check = required_version == installed_version
46 elif operation == '>':
47 check = installed_version > required_version
48 elif operation == '>=':
49 check = installed_version > required_version or \
50 installed_version == required_version
51 else:
52 raise NotImplementedError('operation \'%s\' is not supported' % operation)
53 if not check:
54 raise IncorrectPackageVersionError(name, installed_version, operation, required_version)
55
56
57 class MissingPackageError(Exception):
58
59 error_message = 'mandatory package \'{name}\' not found'
60
61 def __init__(self, package_name):
62 self.package_name = package_name
63 super(MissingPackageError, self).__init__(self.error_message.format(name=package_name))
64
65
66 class IncorrectPackageVersionError(Exception):
67
68 error_message = '\'{name} {installed_version}\' version mismatch ({operation}{required_version})'
69
70 def __init__(self, package_name, installed_version, operation, required_version):
71 self.package_name = package_name
72 self.installed_version = installed_version
73 self.operation = operation
74 self.required_version = required_version
75 message = self.error_message.format(name=package_name,
76 installed_version=installed_version,
77 operation=operation,
78 required_version=required_version)
79 super(IncorrectPackageVersionError, self).__init__(message)
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/autosklearn/util/dependencies.py b/autosklearn/util/dependencies.py
--- a/autosklearn/util/dependencies.py
+++ b/autosklearn/util/dependencies.py
@@ -33,7 +33,7 @@
try:
module = pkg_resources.get_distribution(name)
except pkg_resources.DistributionNotFound:
- raise MissingPackageError(name) from None
+ raise MissingPackageError(name)
if not operation:
return
| {"golden_diff": "diff --git a/autosklearn/util/dependencies.py b/autosklearn/util/dependencies.py\n--- a/autosklearn/util/dependencies.py\n+++ b/autosklearn/util/dependencies.py\n@@ -33,7 +33,7 @@\n try:\n module = pkg_resources.get_distribution(name)\n except pkg_resources.DistributionNotFound:\n- raise MissingPackageError(name) from None\n+ raise MissingPackageError(name)\n \n if not operation:\n return\n", "issue": "Add warning if dependencies are not met\nThere should be a warning if one of the following dependencies is not met:\r\n- scikit-learn==0.17\r\n- smac==0.0.1\r\n- lockfile>=0.10\r\n- ConfigSpace>=0.2.1\r\n- pyrfr==0.2.1\r\n\n", "before_files": [{"content": "from warnings import warn\n\nimport pkg_resources\nimport re\n\nfrom distutils.version import LooseVersion\n\n\nRE_PATTERN = re.compile('^(?P<name>[\\w\\-]+)((?P<operation>==|>=|>)(?P<version>(\\d+\\.)?(\\d+\\.)?(\\d+)))?$')\n\n\ndef verify_packages(packages):\n if not packages:\n return\n if isinstance(packages, str):\n packages = packages.splitlines()\n\n for package in packages:\n if not package:\n continue\n\n match = RE_PATTERN.match(package)\n if match:\n name = match.group('name')\n operation = match.group('operation')\n version = match.group('version')\n _verify_package(name, operation, version)\n else:\n raise ValueError('Unable to read requirement: %s' % package)\n\n\ndef _verify_package(name, operation, version):\n try:\n module = pkg_resources.get_distribution(name)\n except pkg_resources.DistributionNotFound:\n raise MissingPackageError(name) from None\n\n if not operation:\n return\n\n required_version = LooseVersion(version)\n installed_version = LooseVersion(module.version)\n\n if operation == '==':\n check = required_version == installed_version\n elif operation == '>':\n check = installed_version > required_version\n elif operation == '>=':\n check = installed_version > required_version or \\\n installed_version == required_version\n else:\n raise NotImplementedError('operation \\'%s\\' is not supported' % operation)\n if not check:\n raise IncorrectPackageVersionError(name, installed_version, operation, required_version)\n\n\nclass MissingPackageError(Exception):\n\n error_message = 'mandatory package \\'{name}\\' not found'\n\n def __init__(self, package_name):\n self.package_name = package_name\n super(MissingPackageError, self).__init__(self.error_message.format(name=package_name))\n\n\nclass IncorrectPackageVersionError(Exception):\n\n error_message = '\\'{name} {installed_version}\\' version mismatch ({operation}{required_version})'\n\n def __init__(self, package_name, installed_version, operation, required_version):\n self.package_name = package_name\n self.installed_version = installed_version\n self.operation = operation\n self.required_version = required_version\n message = self.error_message.format(name=package_name,\n installed_version=installed_version,\n operation=operation,\n required_version=required_version)\n super(IncorrectPackageVersionError, self).__init__(message)\n", "path": "autosklearn/util/dependencies.py"}], "after_files": [{"content": "from warnings import warn\n\nimport pkg_resources\nimport re\n\nfrom distutils.version import LooseVersion\n\n\nRE_PATTERN = re.compile('^(?P<name>[\\w\\-]+)((?P<operation>==|>=|>)(?P<version>(\\d+\\.)?(\\d+\\.)?(\\d+)))?$')\n\n\ndef verify_packages(packages):\n if not packages:\n return\n if isinstance(packages, str):\n packages = packages.splitlines()\n\n for package in packages:\n if not package:\n continue\n\n match = RE_PATTERN.match(package)\n if match:\n name = match.group('name')\n operation = match.group('operation')\n version = match.group('version')\n _verify_package(name, operation, version)\n else:\n raise ValueError('Unable to read requirement: %s' % package)\n\n\ndef _verify_package(name, operation, version):\n try:\n module = pkg_resources.get_distribution(name)\n except pkg_resources.DistributionNotFound:\n raise MissingPackageError(name)\n\n if not operation:\n return\n\n required_version = LooseVersion(version)\n installed_version = LooseVersion(module.version)\n\n if operation == '==':\n check = required_version == installed_version\n elif operation == '>':\n check = installed_version > required_version\n elif operation == '>=':\n check = installed_version > required_version or \\\n installed_version == required_version\n else:\n raise NotImplementedError('operation \\'%s\\' is not supported' % operation)\n if not check:\n raise IncorrectPackageVersionError(name, installed_version, operation, required_version)\n\n\nclass MissingPackageError(Exception):\n\n error_message = 'mandatory package \\'{name}\\' not found'\n\n def __init__(self, package_name):\n self.package_name = package_name\n super(MissingPackageError, self).__init__(self.error_message.format(name=package_name))\n\n\nclass IncorrectPackageVersionError(Exception):\n\n error_message = '\\'{name} {installed_version}\\' version mismatch ({operation}{required_version})'\n\n def __init__(self, package_name, installed_version, operation, required_version):\n self.package_name = package_name\n self.installed_version = installed_version\n self.operation = operation\n self.required_version = required_version\n message = self.error_message.format(name=package_name,\n installed_version=installed_version,\n operation=operation,\n required_version=required_version)\n super(IncorrectPackageVersionError, self).__init__(message)\n", "path": "autosklearn/util/dependencies.py"}]} | 1,026 | 104 |
gh_patches_debug_9533 | rasdani/github-patches | git_diff | quantumlib__Cirq-5478 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
WaitGate on multiple qubits gets confused about num_qubits when resolved
**Description of the issue**
`WaitGate.num_qubits()` appears to mutate when going through parameter resolution.
**How to reproduce the issue**
```python
g = cirq.wait(cirq.LineQubit(1), cirq.LineQubit(2))
cirq.resolve_parameters(g, {'a': 1})
```
<details>
File ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/protocols/resolve_parameters.py:184, in resolve_parameters(val, param_resolver, recursive)
182 result = NotImplemented
183 else:
--> 184 result = getter(param_resolver, recursive)
186 if result is not NotImplemented:
187 return result
File ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/gate_operation.py:278, in GateOperation._resolve_parameters_(self, resolver, recursive)
274 def _resolve_parameters_(
275 self, resolver: 'cirq.ParamResolver', recursive: bool
276 ) -> 'cirq.Operation':
277 resolved_gate = protocols.resolve_parameters(self.gate, resolver, recursive)
--> 278 return self.with_gate(resolved_gate)
File ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/gate_operation.py:82, in GateOperation.with_gate(self, new_gate)
79 if self.gate is new_gate:
80 # As GateOperation is immutable, this can return the original.
81 return self
---> 82 return new_gate.on(*self.qubits)
File ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/raw_types.py:219, in Gate.on(self, *qubits)
213 def on(self, *qubits: Qid) -> 'Operation':
214 """Returns an application of this gate to the given qubits.
215
216 Args:
217 *qubits: The collection of qubits to potentially apply the gate to.
218 """
--> 219 return ops.gate_operation.GateOperation(self, list(qubits))
File ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/gate_operation.py:61, in GateOperation.__init__(self, gate, qubits)
54 def __init__(self, gate: 'cirq.Gate', qubits: Sequence['cirq.Qid']) -> None:
55 """Inits GateOperation.
56
57 Args:
58 gate: The gate to apply.
59 qubits: The qubits to operate on.
60 """
---> 61 gate.validate_args(qubits)
62 self._gate = gate
63 self._qubits = tuple(qubits)
File ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/raw_types.py:211, in Gate.validate_args(self, qubits)
194 def validate_args(self, qubits: Sequence['cirq.Qid']) -> None:
195 """Checks if this gate can be applied to the given qubits.
196
197 By default checks that:
(...)
209 ValueError: The gate can't be applied to the qubits.
210 """
--> 211 _validate_qid_shape(self, qubits)
File ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/raw_types.py:929, in _validate_qid_shape(val, qubits)
927 qid_shape = protocols.qid_shape(val)
928 if len(qubits) != len(qid_shape):
--> 929 raise ValueError(
930 'Wrong number of qubits for <{!r}>. '
931 'Expected {} qubits but got <{!r}>.'.format(val, len(qid_shape), qubits)
932 )
933 if any(qid.dimension != dimension for qid, dimension in zip(qubits, qid_shape)):
934 raise ValueError(
935 'Wrong shape of qids for <{!r}>. '
936 'Expected {} but got {} <{!r}>.'.format(
937 val, qid_shape, tuple(qid.dimension for qid in qubits), qubits
938 )
939 )
ValueError: Wrong number of qubits for <cirq.WaitGate(cirq.Duration(millis=0))>. Expected 1 qubits but got <[cirq.LineQubit(1), cirq.LineQubit(2)]>.
</details>
**Cirq version**
'0.15.0.dev20220503224557'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq-core/cirq/ops/wait_gate.py`
Content:
```
1 # Copyright 2019 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import AbstractSet, Any, Dict, Optional, Tuple, TYPE_CHECKING, Union
15
16 import sympy
17
18 from cirq import value, protocols
19 from cirq.ops import raw_types
20
21 if TYPE_CHECKING:
22 import cirq
23
24
25 @value.value_equality
26 class WaitGate(raw_types.Gate):
27 r"""An idle gate that represents waiting.
28
29 In non-noisy simulators, this gate is just an identity gate. But noisy
30 simulators and noise models may insert more error for longer waits.
31 """
32
33 def __init__(
34 self,
35 duration: 'cirq.DURATION_LIKE',
36 num_qubits: Optional[int] = None,
37 qid_shape: Tuple[int, ...] = None,
38 ) -> None:
39 """Initialize a wait gate with the given duration.
40
41 Args:
42 duration: A constant or parameterized wait duration. This can be
43 an instance of `datetime.timedelta` or `cirq.Duration`.
44 num_qubits: The number of qubits the gate operates on. If None and `qid_shape` is None,
45 this defaults to one qubit.
46 qid_shape: Can be specified instead of `num_qubits` for the case that the gate should
47 act on qudits.
48
49 Raises:
50 ValueError: If the `qid_shape` provided is empty or `num_qubits` contradicts
51 `qid_shape`.
52 """
53 self._duration = value.Duration(duration)
54 if not protocols.is_parameterized(self.duration) and self.duration < 0:
55 raise ValueError('duration < 0')
56 if qid_shape is None:
57 if num_qubits is None:
58 # Assume one qubit for backwards compatibility
59 qid_shape = (2,)
60 else:
61 qid_shape = (2,) * num_qubits
62 if num_qubits is None:
63 num_qubits = len(qid_shape)
64 if not qid_shape:
65 raise ValueError('Waiting on an empty set of qubits.')
66 if num_qubits != len(qid_shape):
67 raise ValueError('len(qid_shape) != num_qubits')
68 self._qid_shape = qid_shape
69
70 @property
71 def duration(self) -> 'cirq.Duration':
72 return self._duration
73
74 def _is_parameterized_(self) -> bool:
75 return protocols.is_parameterized(self.duration)
76
77 def _parameter_names_(self) -> AbstractSet[str]:
78 return protocols.parameter_names(self.duration)
79
80 def _resolve_parameters_(self, resolver: 'cirq.ParamResolver', recursive: bool) -> 'WaitGate':
81 return WaitGate(protocols.resolve_parameters(self.duration, resolver, recursive))
82
83 def _qid_shape_(self) -> Tuple[int, ...]:
84 return self._qid_shape
85
86 def _has_unitary_(self) -> bool:
87 return True
88
89 def _apply_unitary_(self, args):
90 return args.target_tensor # Identity.
91
92 def _decompose_(self, qubits):
93 return []
94
95 def _trace_distance_bound_(self):
96 return 0
97
98 def __pow__(self, power):
99 if power == 1 or power == -1:
100 # The inverse of a wait is still a wait.
101 return self
102 # Other scalar exponents could scale the wait... but ultimately it is
103 # ambiguous whether the user wanted to scale the duration or just wanted
104 # to affect the unitary. Play it safe and fail.
105 return NotImplemented
106
107 def __str__(self) -> str:
108 return f'WaitGate({self.duration})'
109
110 def __repr__(self) -> str:
111 return f'cirq.WaitGate({repr(self.duration)})'
112
113 def _json_dict_(self) -> Dict[str, Any]:
114 d = protocols.obj_to_dict_helper(self, ['duration'])
115 if len(self._qid_shape) != 1:
116 d['num_qubits'] = len(self._qid_shape)
117 if any(d != 2 for d in self._qid_shape):
118 d['qid_shape'] = self._qid_shape
119 return d
120
121 @classmethod
122 def _from_json_dict_(cls, duration, num_qubits=None, qid_shape=None, **kwargs):
123 return cls(
124 duration=duration,
125 num_qubits=num_qubits,
126 qid_shape=None if qid_shape is None else tuple(qid_shape),
127 )
128
129 def _value_equality_values_(self) -> Any:
130 return self.duration
131
132 def _quil_(self, qubits: Tuple['cirq.Qid', ...], formatter: 'cirq.QuilFormatter'):
133 return 'WAIT\n'
134
135
136 def wait(
137 *target: 'cirq.Qid',
138 duration: 'cirq.DURATION_LIKE' = None,
139 picos: Union[int, float, sympy.Expr] = 0,
140 nanos: Union[int, float, sympy.Expr] = 0,
141 micros: Union[int, float, sympy.Expr] = 0,
142 millis: Union[int, float, sympy.Expr] = 0,
143 ) -> raw_types.Operation:
144 """Creates a WaitGate applied to all the given qubits.
145
146 The duration can be specified as a DURATION_LIKE or using keyword args with
147 numbers in the appropriate units. See Duration for details.
148
149 Args:
150 *target: The qubits that should wait.
151 duration: Wait duration (see Duration).
152 picos: Picoseconds to wait (see Duration).
153 nanos: Nanoseconds to wait (see Duration).
154 micros: Microseconds to wait (see Duration).
155 millis: Milliseconds to wait (see Duration).
156 """
157 return WaitGate(
158 duration=value.Duration(duration, picos=picos, nanos=nanos, micros=micros, millis=millis),
159 qid_shape=protocols.qid_shape(target),
160 ).on(*target)
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq-core/cirq/ops/wait_gate.py b/cirq-core/cirq/ops/wait_gate.py
--- a/cirq-core/cirq/ops/wait_gate.py
+++ b/cirq-core/cirq/ops/wait_gate.py
@@ -78,7 +78,10 @@
return protocols.parameter_names(self.duration)
def _resolve_parameters_(self, resolver: 'cirq.ParamResolver', recursive: bool) -> 'WaitGate':
- return WaitGate(protocols.resolve_parameters(self.duration, resolver, recursive))
+ return WaitGate(
+ protocols.resolve_parameters(self.duration, resolver, recursive),
+ qid_shape=self._qid_shape,
+ )
def _qid_shape_(self) -> Tuple[int, ...]:
return self._qid_shape
| {"golden_diff": "diff --git a/cirq-core/cirq/ops/wait_gate.py b/cirq-core/cirq/ops/wait_gate.py\n--- a/cirq-core/cirq/ops/wait_gate.py\n+++ b/cirq-core/cirq/ops/wait_gate.py\n@@ -78,7 +78,10 @@\n return protocols.parameter_names(self.duration)\n \n def _resolve_parameters_(self, resolver: 'cirq.ParamResolver', recursive: bool) -> 'WaitGate':\n- return WaitGate(protocols.resolve_parameters(self.duration, resolver, recursive))\n+ return WaitGate(\n+ protocols.resolve_parameters(self.duration, resolver, recursive),\n+ qid_shape=self._qid_shape,\n+ )\n \n def _qid_shape_(self) -> Tuple[int, ...]:\n return self._qid_shape\n", "issue": "WaitGate on multiple qubits gets confused about num_qubits when resolved\n**Description of the issue**\r\n`WaitGate.num_qubits()` appears to mutate when going through parameter resolution.\r\n\r\n**How to reproduce the issue**\r\n\r\n```python\r\ng = cirq.wait(cirq.LineQubit(1), cirq.LineQubit(2))\r\ncirq.resolve_parameters(g, {'a': 1})\r\n```\r\n\r\n<details>\r\nFile ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/protocols/resolve_parameters.py:184, in resolve_parameters(val, param_resolver, recursive)\r\n 182 result = NotImplemented\r\n 183 else:\r\n--> 184 result = getter(param_resolver, recursive)\r\n 186 if result is not NotImplemented:\r\n 187 return result\r\n\r\nFile ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/gate_operation.py:278, in GateOperation._resolve_parameters_(self, resolver, recursive)\r\n 274 def _resolve_parameters_(\r\n 275 self, resolver: 'cirq.ParamResolver', recursive: bool\r\n 276 ) -> 'cirq.Operation':\r\n 277 resolved_gate = protocols.resolve_parameters(self.gate, resolver, recursive)\r\n--> 278 return self.with_gate(resolved_gate)\r\n\r\nFile ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/gate_operation.py:82, in GateOperation.with_gate(self, new_gate)\r\n 79 if self.gate is new_gate:\r\n 80 # As GateOperation is immutable, this can return the original.\r\n 81 return self\r\n---> 82 return new_gate.on(*self.qubits)\r\n\r\nFile ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/raw_types.py:219, in Gate.on(self, *qubits)\r\n 213 def on(self, *qubits: Qid) -> 'Operation':\r\n 214 \"\"\"Returns an application of this gate to the given qubits.\r\n 215 \r\n 216 Args:\r\n 217 *qubits: The collection of qubits to potentially apply the gate to.\r\n 218 \"\"\"\r\n--> 219 return ops.gate_operation.GateOperation(self, list(qubits))\r\n\r\nFile ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/gate_operation.py:61, in GateOperation.__init__(self, gate, qubits)\r\n 54 def __init__(self, gate: 'cirq.Gate', qubits: Sequence['cirq.Qid']) -> None:\r\n 55 \"\"\"Inits GateOperation.\r\n 56 \r\n 57 Args:\r\n 58 gate: The gate to apply.\r\n 59 qubits: The qubits to operate on.\r\n 60 \"\"\"\r\n---> 61 gate.validate_args(qubits)\r\n 62 self._gate = gate\r\n 63 self._qubits = tuple(qubits)\r\n\r\nFile ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/raw_types.py:211, in Gate.validate_args(self, qubits)\r\n 194 def validate_args(self, qubits: Sequence['cirq.Qid']) -> None:\r\n 195 \"\"\"Checks if this gate can be applied to the given qubits.\r\n 196 \r\n 197 By default checks that:\r\n (...)\r\n 209 ValueError: The gate can't be applied to the qubits.\r\n 210 \"\"\"\r\n--> 211 _validate_qid_shape(self, qubits)\r\n\r\nFile ~/.virtualenvs/pyle/lib/python3.8/site-packages/cirq/ops/raw_types.py:929, in _validate_qid_shape(val, qubits)\r\n 927 qid_shape = protocols.qid_shape(val)\r\n 928 if len(qubits) != len(qid_shape):\r\n--> 929 raise ValueError(\r\n 930 'Wrong number of qubits for <{!r}>. '\r\n 931 'Expected {} qubits but got <{!r}>.'.format(val, len(qid_shape), qubits)\r\n 932 )\r\n 933 if any(qid.dimension != dimension for qid, dimension in zip(qubits, qid_shape)):\r\n 934 raise ValueError(\r\n 935 'Wrong shape of qids for <{!r}>. '\r\n 936 'Expected {} but got {} <{!r}>.'.format(\r\n 937 val, qid_shape, tuple(qid.dimension for qid in qubits), qubits\r\n 938 )\r\n 939 )\r\n\r\nValueError: Wrong number of qubits for <cirq.WaitGate(cirq.Duration(millis=0))>. Expected 1 qubits but got <[cirq.LineQubit(1), cirq.LineQubit(2)]>.\r\n</details>\r\n\r\n**Cirq version**\r\n'0.15.0.dev20220503224557'\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import AbstractSet, Any, Dict, Optional, Tuple, TYPE_CHECKING, Union\n\nimport sympy\n\nfrom cirq import value, protocols\nfrom cirq.ops import raw_types\n\nif TYPE_CHECKING:\n import cirq\n\n\[email protected]_equality\nclass WaitGate(raw_types.Gate):\n r\"\"\"An idle gate that represents waiting.\n\n In non-noisy simulators, this gate is just an identity gate. But noisy\n simulators and noise models may insert more error for longer waits.\n \"\"\"\n\n def __init__(\n self,\n duration: 'cirq.DURATION_LIKE',\n num_qubits: Optional[int] = None,\n qid_shape: Tuple[int, ...] = None,\n ) -> None:\n \"\"\"Initialize a wait gate with the given duration.\n\n Args:\n duration: A constant or parameterized wait duration. This can be\n an instance of `datetime.timedelta` or `cirq.Duration`.\n num_qubits: The number of qubits the gate operates on. If None and `qid_shape` is None,\n this defaults to one qubit.\n qid_shape: Can be specified instead of `num_qubits` for the case that the gate should\n act on qudits.\n\n Raises:\n ValueError: If the `qid_shape` provided is empty or `num_qubits` contradicts\n `qid_shape`.\n \"\"\"\n self._duration = value.Duration(duration)\n if not protocols.is_parameterized(self.duration) and self.duration < 0:\n raise ValueError('duration < 0')\n if qid_shape is None:\n if num_qubits is None:\n # Assume one qubit for backwards compatibility\n qid_shape = (2,)\n else:\n qid_shape = (2,) * num_qubits\n if num_qubits is None:\n num_qubits = len(qid_shape)\n if not qid_shape:\n raise ValueError('Waiting on an empty set of qubits.')\n if num_qubits != len(qid_shape):\n raise ValueError('len(qid_shape) != num_qubits')\n self._qid_shape = qid_shape\n\n @property\n def duration(self) -> 'cirq.Duration':\n return self._duration\n\n def _is_parameterized_(self) -> bool:\n return protocols.is_parameterized(self.duration)\n\n def _parameter_names_(self) -> AbstractSet[str]:\n return protocols.parameter_names(self.duration)\n\n def _resolve_parameters_(self, resolver: 'cirq.ParamResolver', recursive: bool) -> 'WaitGate':\n return WaitGate(protocols.resolve_parameters(self.duration, resolver, recursive))\n\n def _qid_shape_(self) -> Tuple[int, ...]:\n return self._qid_shape\n\n def _has_unitary_(self) -> bool:\n return True\n\n def _apply_unitary_(self, args):\n return args.target_tensor # Identity.\n\n def _decompose_(self, qubits):\n return []\n\n def _trace_distance_bound_(self):\n return 0\n\n def __pow__(self, power):\n if power == 1 or power == -1:\n # The inverse of a wait is still a wait.\n return self\n # Other scalar exponents could scale the wait... but ultimately it is\n # ambiguous whether the user wanted to scale the duration or just wanted\n # to affect the unitary. Play it safe and fail.\n return NotImplemented\n\n def __str__(self) -> str:\n return f'WaitGate({self.duration})'\n\n def __repr__(self) -> str:\n return f'cirq.WaitGate({repr(self.duration)})'\n\n def _json_dict_(self) -> Dict[str, Any]:\n d = protocols.obj_to_dict_helper(self, ['duration'])\n if len(self._qid_shape) != 1:\n d['num_qubits'] = len(self._qid_shape)\n if any(d != 2 for d in self._qid_shape):\n d['qid_shape'] = self._qid_shape\n return d\n\n @classmethod\n def _from_json_dict_(cls, duration, num_qubits=None, qid_shape=None, **kwargs):\n return cls(\n duration=duration,\n num_qubits=num_qubits,\n qid_shape=None if qid_shape is None else tuple(qid_shape),\n )\n\n def _value_equality_values_(self) -> Any:\n return self.duration\n\n def _quil_(self, qubits: Tuple['cirq.Qid', ...], formatter: 'cirq.QuilFormatter'):\n return 'WAIT\\n'\n\n\ndef wait(\n *target: 'cirq.Qid',\n duration: 'cirq.DURATION_LIKE' = None,\n picos: Union[int, float, sympy.Expr] = 0,\n nanos: Union[int, float, sympy.Expr] = 0,\n micros: Union[int, float, sympy.Expr] = 0,\n millis: Union[int, float, sympy.Expr] = 0,\n) -> raw_types.Operation:\n \"\"\"Creates a WaitGate applied to all the given qubits.\n\n The duration can be specified as a DURATION_LIKE or using keyword args with\n numbers in the appropriate units. See Duration for details.\n\n Args:\n *target: The qubits that should wait.\n duration: Wait duration (see Duration).\n picos: Picoseconds to wait (see Duration).\n nanos: Nanoseconds to wait (see Duration).\n micros: Microseconds to wait (see Duration).\n millis: Milliseconds to wait (see Duration).\n \"\"\"\n return WaitGate(\n duration=value.Duration(duration, picos=picos, nanos=nanos, micros=micros, millis=millis),\n qid_shape=protocols.qid_shape(target),\n ).on(*target)\n", "path": "cirq-core/cirq/ops/wait_gate.py"}], "after_files": [{"content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import AbstractSet, Any, Dict, Optional, Tuple, TYPE_CHECKING, Union\n\nimport sympy\n\nfrom cirq import value, protocols\nfrom cirq.ops import raw_types\n\nif TYPE_CHECKING:\n import cirq\n\n\[email protected]_equality\nclass WaitGate(raw_types.Gate):\n r\"\"\"An idle gate that represents waiting.\n\n In non-noisy simulators, this gate is just an identity gate. But noisy\n simulators and noise models may insert more error for longer waits.\n \"\"\"\n\n def __init__(\n self,\n duration: 'cirq.DURATION_LIKE',\n num_qubits: Optional[int] = None,\n qid_shape: Tuple[int, ...] = None,\n ) -> None:\n \"\"\"Initialize a wait gate with the given duration.\n\n Args:\n duration: A constant or parameterized wait duration. This can be\n an instance of `datetime.timedelta` or `cirq.Duration`.\n num_qubits: The number of qubits the gate operates on. If None and `qid_shape` is None,\n this defaults to one qubit.\n qid_shape: Can be specified instead of `num_qubits` for the case that the gate should\n act on qudits.\n\n Raises:\n ValueError: If the `qid_shape` provided is empty or `num_qubits` contradicts\n `qid_shape`.\n \"\"\"\n self._duration = value.Duration(duration)\n if not protocols.is_parameterized(self.duration) and self.duration < 0:\n raise ValueError('duration < 0')\n if qid_shape is None:\n if num_qubits is None:\n # Assume one qubit for backwards compatibility\n qid_shape = (2,)\n else:\n qid_shape = (2,) * num_qubits\n if num_qubits is None:\n num_qubits = len(qid_shape)\n if not qid_shape:\n raise ValueError('Waiting on an empty set of qubits.')\n if num_qubits != len(qid_shape):\n raise ValueError('len(qid_shape) != num_qubits')\n self._qid_shape = qid_shape\n\n @property\n def duration(self) -> 'cirq.Duration':\n return self._duration\n\n def _is_parameterized_(self) -> bool:\n return protocols.is_parameterized(self.duration)\n\n def _parameter_names_(self) -> AbstractSet[str]:\n return protocols.parameter_names(self.duration)\n\n def _resolve_parameters_(self, resolver: 'cirq.ParamResolver', recursive: bool) -> 'WaitGate':\n return WaitGate(\n protocols.resolve_parameters(self.duration, resolver, recursive),\n qid_shape=self._qid_shape,\n )\n\n def _qid_shape_(self) -> Tuple[int, ...]:\n return self._qid_shape\n\n def _has_unitary_(self) -> bool:\n return True\n\n def _apply_unitary_(self, args):\n return args.target_tensor # Identity.\n\n def _decompose_(self, qubits):\n return []\n\n def _trace_distance_bound_(self):\n return 0\n\n def __pow__(self, power):\n if power == 1 or power == -1:\n # The inverse of a wait is still a wait.\n return self\n # Other scalar exponents could scale the wait... but ultimately it is\n # ambiguous whether the user wanted to scale the duration or just wanted\n # to affect the unitary. Play it safe and fail.\n return NotImplemented\n\n def __str__(self) -> str:\n return f'WaitGate({self.duration})'\n\n def __repr__(self) -> str:\n return f'cirq.WaitGate({repr(self.duration)})'\n\n def _json_dict_(self) -> Dict[str, Any]:\n d = protocols.obj_to_dict_helper(self, ['duration'])\n if len(self._qid_shape) != 1:\n d['num_qubits'] = len(self._qid_shape)\n if any(d != 2 for d in self._qid_shape):\n d['qid_shape'] = self._qid_shape\n return d\n\n @classmethod\n def _from_json_dict_(cls, duration, num_qubits=None, qid_shape=None, **kwargs):\n return cls(\n duration=duration,\n num_qubits=num_qubits,\n qid_shape=None if qid_shape is None else tuple(qid_shape),\n )\n\n def _value_equality_values_(self) -> Any:\n return self.duration\n\n def _quil_(self, qubits: Tuple['cirq.Qid', ...], formatter: 'cirq.QuilFormatter'):\n return 'WAIT\\n'\n\n\ndef wait(\n *target: 'cirq.Qid',\n duration: 'cirq.DURATION_LIKE' = None,\n picos: Union[int, float, sympy.Expr] = 0,\n nanos: Union[int, float, sympy.Expr] = 0,\n micros: Union[int, float, sympy.Expr] = 0,\n millis: Union[int, float, sympy.Expr] = 0,\n) -> raw_types.Operation:\n \"\"\"Creates a WaitGate applied to all the given qubits.\n\n The duration can be specified as a DURATION_LIKE or using keyword args with\n numbers in the appropriate units. See Duration for details.\n\n Args:\n *target: The qubits that should wait.\n duration: Wait duration (see Duration).\n picos: Picoseconds to wait (see Duration).\n nanos: Nanoseconds to wait (see Duration).\n micros: Microseconds to wait (see Duration).\n millis: Milliseconds to wait (see Duration).\n \"\"\"\n return WaitGate(\n duration=value.Duration(duration, picos=picos, nanos=nanos, micros=micros, millis=millis),\n qid_shape=protocols.qid_shape(target),\n ).on(*target)\n", "path": "cirq-core/cirq/ops/wait_gate.py"}]} | 3,189 | 168 |
gh_patches_debug_7156 | rasdani/github-patches | git_diff | litestar-org__litestar-2673 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: OpenAPI schema generation fails with pydantic < 2.0.0
### Description
When pydantic 1.* is installed, interactive API docs cannot be loaded.
It seems the problem is that a helper function is trying to access a `pydantic_v2` attribute without checking if it was imported correctly, the stack trace below provides more info.
### URL to code causing the issue
_No response_
### MCVE
```python
from litestar import Litestar, post
from litestar.testing import TestClient
from pydantic import BaseModel
class Order(BaseModel):
code: str
@post()
async def test_handler(data: Order) -> Order:
return data
app = Litestar(route_handlers=[test_handler], debug=True)
def test() -> None:
with TestClient(app=app) as client:
response = client.get(
"/schema/swagger",
)
assert response.is_success
if __name__ == "__main__":
test()
```
### Steps to reproduce
```bash
1. Install Litestar with Pydantic v1
2. Start up a server
3. Perform a request to /schema/swagger
```
### Screenshots
```bash
""
```
### Logs
```bash
Traceback (most recent call last):
File "/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py", line 191, in __call__
await self.app(scope, receive, send)
File "/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/routes/http.py", line 81, in handle
response = await self._get_response_for_request(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/routes/http.py", line 133, in _get_response_for_request
return await self._call_handler_function(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/routes/http.py", line 153, in _call_handler_function
response_data, cleanup_group = await self._get_response_data(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/routes/http.py", line 199, in _get_response_data
data = route_handler.fn(**parsed_kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/openapi/controller.py", line 231, in swagger_ui
return ASGIResponse(body=self.render_swagger_ui(request), media_type=MediaType.HTML)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/openapi/controller.py", line 391, in render_swagger_ui
schema = self.get_schema_from_request(request)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/openapi/controller.py", line 96, in get_schema_from_request
return request.app.openapi_schema
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/app.py", line 586, in openapi_schema
self.update_openapi_schema()
File "/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/app.py", line 836, in update_openapi_schema
path_item, created_operation_ids = create_path_item(
^^^^^^^^^^^^^^^^^
File "/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/_openapi/path_item.py", line 109, in create_path_item
request_body = create_request_body(
^^^^^^^^^^^^^^^^^^^^
File "/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/_openapi/request_body.py", line 32, in create_request_body
schema = schema_creator.for_field_definition(field_definition)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/_openapi/schema_generation/schema.py", line 301, in for_field_definition
result = self.for_plugin(field_definition, plugin_for_annotation)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/_openapi/schema_generation/schema.py", line 480, in for_plugin
schema = plugin.to_openapi_schema(field_definition=field_definition, schema_creator=self)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/contrib/pydantic/pydantic_schema_plugin.py", line 230, in to_openapi_schema
return self.for_pydantic_model(field_definition=field_definition, schema_creator=schema_creator)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/contrib/pydantic/pydantic_schema_plugin.py", line 250, in for_pydantic_model
if is_pydantic_2_model(annotation):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/contrib/pydantic/utils.py", line 161, in is_pydantic_2_model
return issubclass(obj, pydantic_v2.BaseModel) # pyright: ignore
^^^^^^^^^^^^^^^^^^^^^
AttributeError: type object 'Empty' has no attribute 'BaseModel'
```
### Litestar Version
2.3.2
### Platform
- [X] Linux
- [ ] Mac
- [ ] Windows
- [ ] Other (Please specify in the description above)
<!-- POLAR PLEDGE BADGE START -->
---
> [!NOTE]
> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and
> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.
>
> Check out all issues funded or available for funding [on our Polar.sh Litestar dashboard](https://polar.sh/litestar-org)
> * If you would like to see an issue prioritized, make a pledge towards it!
> * We receive the pledge once the issue is completed & verified
> * This, along with engagement in the community, helps us know which features are a priority to our users.
<a href="https://polar.sh/litestar-org/litestar/issues/2672">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="https://polar.sh/api/github/litestar-org/litestar/issues/2672/pledge.svg?darkmode=1">
<img alt="Fund with Polar" src="https://polar.sh/api/github/litestar-org/litestar/issues/2672/pledge.svg">
</picture>
</a>
<!-- POLAR PLEDGE BADGE END -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `litestar/contrib/pydantic/utils.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import TYPE_CHECKING, Any
4
5 from typing_extensions import get_type_hints
6
7 from litestar.types import Empty
8 from litestar.utils import is_class_and_subclass
9 from litestar.utils.predicates import is_generic
10 from litestar.utils.typing import (
11 _substitute_typevars,
12 get_origin_or_inner_type,
13 get_type_hints_with_generics_resolved,
14 instantiable_type_mapping,
15 )
16
17 # isort: off
18 try:
19 from pydantic import v1 as pydantic_v1
20 import pydantic as pydantic_v2
21 from pydantic.fields import PydanticUndefined as Pydantic2Undefined # type: ignore[attr-defined]
22 from pydantic.v1.fields import Undefined as Pydantic1Undefined
23
24 PYDANTIC_UNDEFINED_SENTINELS = {Pydantic1Undefined, Pydantic2Undefined}
25 except ImportError:
26 try:
27 import pydantic as pydantic_v1 # type: ignore[no-redef]
28 from pydantic.fields import Undefined as Pydantic1Undefined # type: ignore[attr-defined, no-redef]
29
30 pydantic_v2 = Empty # type: ignore[assignment]
31 PYDANTIC_UNDEFINED_SENTINELS = {Pydantic1Undefined}
32
33 except ImportError: # pyright: ignore
34 pydantic_v1 = Empty # type: ignore[assignment]
35 pydantic_v2 = Empty # type: ignore[assignment]
36 PYDANTIC_UNDEFINED_SENTINELS = set()
37 # isort: on
38
39
40 if TYPE_CHECKING:
41 from typing_extensions import TypeGuard
42
43
44 def is_pydantic_model_class(
45 annotation: Any,
46 ) -> TypeGuard[type[pydantic_v1.BaseModel | pydantic_v2.BaseModel]]: # pyright: ignore
47 """Given a type annotation determine if the annotation is a subclass of pydantic's BaseModel.
48
49 Args:
50 annotation: A type.
51
52 Returns:
53 A typeguard determining whether the type is :data:`BaseModel pydantic.BaseModel>`.
54 """
55 if pydantic_v1 is Empty: # type: ignore[comparison-overlap] # pragma: no cover
56 return False
57
58 if pydantic_v2 is Empty: # type: ignore[comparison-overlap] # pragma: no cover
59 return is_class_and_subclass(annotation, pydantic_v1.BaseModel)
60
61 return is_class_and_subclass(annotation, (pydantic_v1.BaseModel, pydantic_v2.BaseModel))
62
63
64 def is_pydantic_model_instance(
65 annotation: Any,
66 ) -> TypeGuard[pydantic_v1.BaseModel | pydantic_v2.BaseModel]: # pyright: ignore
67 """Given a type annotation determine if the annotation is an instance of pydantic's BaseModel.
68
69 Args:
70 annotation: A type.
71
72 Returns:
73 A typeguard determining whether the type is :data:`BaseModel pydantic.BaseModel>`.
74 """
75 if pydantic_v1 is Empty: # type: ignore[comparison-overlap] # pragma: no cover
76 return False
77
78 if pydantic_v2 is Empty: # type: ignore[comparison-overlap] # pragma: no cover
79 return isinstance(annotation, pydantic_v1.BaseModel)
80
81 return isinstance(annotation, (pydantic_v1.BaseModel, pydantic_v2.BaseModel))
82
83
84 def is_pydantic_constrained_field(annotation: Any) -> bool:
85 """Check if the given annotation is a constrained pydantic type.
86
87 Args:
88 annotation: A type annotation
89
90 Returns:
91 True if pydantic is installed and the type is a constrained type, otherwise False.
92 """
93 if pydantic_v1 is Empty: # type: ignore[comparison-overlap] # pragma: no cover
94 return False
95
96 return any(
97 is_class_and_subclass(annotation, constrained_type) # pyright: ignore
98 for constrained_type in (
99 pydantic_v1.ConstrainedBytes,
100 pydantic_v1.ConstrainedDate,
101 pydantic_v1.ConstrainedDecimal,
102 pydantic_v1.ConstrainedFloat,
103 pydantic_v1.ConstrainedFrozenSet,
104 pydantic_v1.ConstrainedInt,
105 pydantic_v1.ConstrainedList,
106 pydantic_v1.ConstrainedSet,
107 pydantic_v1.ConstrainedStr,
108 )
109 )
110
111
112 def pydantic_unwrap_and_get_origin(annotation: Any) -> Any | None:
113 if pydantic_v2 is Empty or is_class_and_subclass(annotation, pydantic_v1.BaseModel): # type: ignore[comparison-overlap]
114 return get_origin_or_inner_type(annotation)
115
116 origin = annotation.__pydantic_generic_metadata__["origin"]
117 return instantiable_type_mapping.get(origin, origin)
118
119
120 def pydantic_get_type_hints_with_generics_resolved(
121 annotation: Any,
122 globalns: dict[str, Any] | None = None,
123 localns: dict[str, Any] | None = None,
124 include_extras: bool = False,
125 ) -> dict[str, Any]:
126 if pydantic_v2 is Empty or is_class_and_subclass(annotation, pydantic_v1.BaseModel): # type: ignore[comparison-overlap]
127 return get_type_hints_with_generics_resolved(annotation)
128
129 origin = pydantic_unwrap_and_get_origin(annotation)
130 if origin is None:
131 type_hints = get_type_hints(annotation, globalns=globalns, localns=localns, include_extras=include_extras)
132 typevar_map = {p: p for p in annotation.__pydantic_generic_metadata__["parameters"]}
133 else:
134 type_hints = get_type_hints(origin, globalns=globalns, localns=localns, include_extras=include_extras)
135 args = annotation.__pydantic_generic_metadata__["args"]
136 parameters = origin.__pydantic_generic_metadata__["parameters"]
137 typevar_map = dict(zip(parameters, args))
138
139 return {n: _substitute_typevars(type_, typevar_map) for n, type_ in type_hints.items()}
140
141
142 def pydantic_get_unwrapped_annotation_and_type_hints(annotation: Any) -> tuple[Any, dict[str, Any]]:
143 """Get the unwrapped annotation and the type hints after resolving generics.
144
145 Args:
146 annotation: A type annotation.
147
148 Returns:
149 A tuple containing the unwrapped annotation and the type hints.
150 """
151
152 if is_generic(annotation):
153 origin = pydantic_unwrap_and_get_origin(annotation)
154 return origin or annotation, pydantic_get_type_hints_with_generics_resolved(annotation, include_extras=True)
155 return annotation, get_type_hints(annotation, include_extras=True)
156
157
158 def is_pydantic_2_model(
159 obj: type[pydantic_v1.BaseModel | pydantic_v2.BaseModel], # pyright: ignore
160 ) -> TypeGuard[pydantic_v2.BaseModel]: # pyright: ignore
161 return issubclass(obj, pydantic_v2.BaseModel) # pyright: ignore
162
163
164 def is_pydantic_undefined(value: Any) -> bool:
165 return any(v is value for v in PYDANTIC_UNDEFINED_SENTINELS)
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/litestar/contrib/pydantic/utils.py b/litestar/contrib/pydantic/utils.py
--- a/litestar/contrib/pydantic/utils.py
+++ b/litestar/contrib/pydantic/utils.py
@@ -158,7 +158,7 @@
def is_pydantic_2_model(
obj: type[pydantic_v1.BaseModel | pydantic_v2.BaseModel], # pyright: ignore
) -> TypeGuard[pydantic_v2.BaseModel]: # pyright: ignore
- return issubclass(obj, pydantic_v2.BaseModel) # pyright: ignore
+ return pydantic_v2 is not Empty and issubclass(obj, pydantic_v2.BaseModel) # type: ignore[comparison-overlap]
def is_pydantic_undefined(value: Any) -> bool:
| {"golden_diff": "diff --git a/litestar/contrib/pydantic/utils.py b/litestar/contrib/pydantic/utils.py\n--- a/litestar/contrib/pydantic/utils.py\n+++ b/litestar/contrib/pydantic/utils.py\n@@ -158,7 +158,7 @@\n def is_pydantic_2_model(\n obj: type[pydantic_v1.BaseModel | pydantic_v2.BaseModel], # pyright: ignore\n ) -> TypeGuard[pydantic_v2.BaseModel]: # pyright: ignore\n- return issubclass(obj, pydantic_v2.BaseModel) # pyright: ignore\n+ return pydantic_v2 is not Empty and issubclass(obj, pydantic_v2.BaseModel) # type: ignore[comparison-overlap]\n \n \n def is_pydantic_undefined(value: Any) -> bool:\n", "issue": "Bug: OpenAPI schema generation fails with pydantic < 2.0.0\n### Description\n\nWhen pydantic 1.* is installed, interactive API docs cannot be loaded.\r\n\r\nIt seems the problem is that a helper function is trying to access a `pydantic_v2` attribute without checking if it was imported correctly, the stack trace below provides more info.\n\n### URL to code causing the issue\n\n_No response_\n\n### MCVE\n\n```python\nfrom litestar import Litestar, post\r\nfrom litestar.testing import TestClient\r\nfrom pydantic import BaseModel\r\n\r\n\r\nclass Order(BaseModel):\r\n code: str\r\n\r\n\r\n@post()\r\nasync def test_handler(data: Order) -> Order:\r\n return data\r\n\r\n\r\napp = Litestar(route_handlers=[test_handler], debug=True)\r\n\r\n\r\ndef test() -> None:\r\n with TestClient(app=app) as client:\r\n response = client.get(\r\n \"/schema/swagger\",\r\n )\r\n assert response.is_success\r\n\r\n\r\nif __name__ == \"__main__\":\r\n test()\n```\n\n\n### Steps to reproduce\n\n```bash\n1. Install Litestar with Pydantic v1\r\n2. Start up a server\r\n3. Perform a request to /schema/swagger\n```\n\n\n### Screenshots\n\n```bash\n\"\"\n```\n\n\n### Logs\n\n```bash\nTraceback (most recent call last):\r\n File \"/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/middleware/exceptions/middleware.py\", line 191, in __call__\r\n await self.app(scope, receive, send)\r\n File \"/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/routes/http.py\", line 81, in handle\r\n response = await self._get_response_for_request(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/routes/http.py\", line 133, in _get_response_for_request\r\n return await self._call_handler_function(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/routes/http.py\", line 153, in _call_handler_function\r\n response_data, cleanup_group = await self._get_response_data(\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/routes/http.py\", line 199, in _get_response_data\r\n data = route_handler.fn(**parsed_kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/openapi/controller.py\", line 231, in swagger_ui\r\n return ASGIResponse(body=self.render_swagger_ui(request), media_type=MediaType.HTML)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/openapi/controller.py\", line 391, in render_swagger_ui\r\n schema = self.get_schema_from_request(request)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/openapi/controller.py\", line 96, in get_schema_from_request\r\n return request.app.openapi_schema\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/app.py\", line 586, in openapi_schema\r\n self.update_openapi_schema()\r\n File \"/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/app.py\", line 836, in update_openapi_schema\r\n path_item, created_operation_ids = create_path_item(\r\n ^^^^^^^^^^^^^^^^^\r\n File \"/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/_openapi/path_item.py\", line 109, in create_path_item\r\n request_body = create_request_body(\r\n ^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/_openapi/request_body.py\", line 32, in create_request_body\r\n schema = schema_creator.for_field_definition(field_definition)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/_openapi/schema_generation/schema.py\", line 301, in for_field_definition\r\n result = self.for_plugin(field_definition, plugin_for_annotation)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/_openapi/schema_generation/schema.py\", line 480, in for_plugin\r\n schema = plugin.to_openapi_schema(field_definition=field_definition, schema_creator=self)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/contrib/pydantic/pydantic_schema_plugin.py\", line 230, in to_openapi_schema\r\n return self.for_pydantic_model(field_definition=field_definition, schema_creator=schema_creator)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/contrib/pydantic/pydantic_schema_plugin.py\", line 250, in for_pydantic_model\r\n if is_pydantic_2_model(annotation):\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"/home/thunder/work/litestar_mvce/litestar_venv/lib/python3.11/site-packages/litestar/contrib/pydantic/utils.py\", line 161, in is_pydantic_2_model\r\n return issubclass(obj, pydantic_v2.BaseModel) # pyright: ignore\r\n ^^^^^^^^^^^^^^^^^^^^^\r\nAttributeError: type object 'Empty' has no attribute 'BaseModel'\n```\n\n\n### Litestar Version\n\n2.3.2\n\n### Platform\n\n- [X] Linux\n- [ ] Mac\n- [ ] Windows\n- [ ] Other (Please specify in the description above)\n\n<!-- POLAR PLEDGE BADGE START -->\n---\n> [!NOTE] \n> While we are open for sponsoring on [GitHub Sponsors](https://github.com/sponsors/litestar-org/) and \n> [OpenCollective](https://opencollective.com/litestar), we also utilize [Polar.sh](https://polar.sh/) to engage in pledge-based sponsorship.\n>\n> Check out all issues funded or available for funding [on our Polar.sh Litestar dashboard](https://polar.sh/litestar-org)\n> * If you would like to see an issue prioritized, make a pledge towards it!\n> * We receive the pledge once the issue is completed & verified\n> * This, along with engagement in the community, helps us know which features are a priority to our users.\n\n<a href=\"https://polar.sh/litestar-org/litestar/issues/2672\">\n<picture>\n <source media=\"(prefers-color-scheme: dark)\" srcset=\"https://polar.sh/api/github/litestar-org/litestar/issues/2672/pledge.svg?darkmode=1\">\n <img alt=\"Fund with Polar\" src=\"https://polar.sh/api/github/litestar-org/litestar/issues/2672/pledge.svg\">\n</picture>\n</a>\n<!-- POLAR PLEDGE BADGE END -->\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any\n\nfrom typing_extensions import get_type_hints\n\nfrom litestar.types import Empty\nfrom litestar.utils import is_class_and_subclass\nfrom litestar.utils.predicates import is_generic\nfrom litestar.utils.typing import (\n _substitute_typevars,\n get_origin_or_inner_type,\n get_type_hints_with_generics_resolved,\n instantiable_type_mapping,\n)\n\n# isort: off\ntry:\n from pydantic import v1 as pydantic_v1\n import pydantic as pydantic_v2\n from pydantic.fields import PydanticUndefined as Pydantic2Undefined # type: ignore[attr-defined]\n from pydantic.v1.fields import Undefined as Pydantic1Undefined\n\n PYDANTIC_UNDEFINED_SENTINELS = {Pydantic1Undefined, Pydantic2Undefined}\nexcept ImportError:\n try:\n import pydantic as pydantic_v1 # type: ignore[no-redef]\n from pydantic.fields import Undefined as Pydantic1Undefined # type: ignore[attr-defined, no-redef]\n\n pydantic_v2 = Empty # type: ignore[assignment]\n PYDANTIC_UNDEFINED_SENTINELS = {Pydantic1Undefined}\n\n except ImportError: # pyright: ignore\n pydantic_v1 = Empty # type: ignore[assignment]\n pydantic_v2 = Empty # type: ignore[assignment]\n PYDANTIC_UNDEFINED_SENTINELS = set()\n# isort: on\n\n\nif TYPE_CHECKING:\n from typing_extensions import TypeGuard\n\n\ndef is_pydantic_model_class(\n annotation: Any,\n) -> TypeGuard[type[pydantic_v1.BaseModel | pydantic_v2.BaseModel]]: # pyright: ignore\n \"\"\"Given a type annotation determine if the annotation is a subclass of pydantic's BaseModel.\n\n Args:\n annotation: A type.\n\n Returns:\n A typeguard determining whether the type is :data:`BaseModel pydantic.BaseModel>`.\n \"\"\"\n if pydantic_v1 is Empty: # type: ignore[comparison-overlap] # pragma: no cover\n return False\n\n if pydantic_v2 is Empty: # type: ignore[comparison-overlap] # pragma: no cover\n return is_class_and_subclass(annotation, pydantic_v1.BaseModel)\n\n return is_class_and_subclass(annotation, (pydantic_v1.BaseModel, pydantic_v2.BaseModel))\n\n\ndef is_pydantic_model_instance(\n annotation: Any,\n) -> TypeGuard[pydantic_v1.BaseModel | pydantic_v2.BaseModel]: # pyright: ignore\n \"\"\"Given a type annotation determine if the annotation is an instance of pydantic's BaseModel.\n\n Args:\n annotation: A type.\n\n Returns:\n A typeguard determining whether the type is :data:`BaseModel pydantic.BaseModel>`.\n \"\"\"\n if pydantic_v1 is Empty: # type: ignore[comparison-overlap] # pragma: no cover\n return False\n\n if pydantic_v2 is Empty: # type: ignore[comparison-overlap] # pragma: no cover\n return isinstance(annotation, pydantic_v1.BaseModel)\n\n return isinstance(annotation, (pydantic_v1.BaseModel, pydantic_v2.BaseModel))\n\n\ndef is_pydantic_constrained_field(annotation: Any) -> bool:\n \"\"\"Check if the given annotation is a constrained pydantic type.\n\n Args:\n annotation: A type annotation\n\n Returns:\n True if pydantic is installed and the type is a constrained type, otherwise False.\n \"\"\"\n if pydantic_v1 is Empty: # type: ignore[comparison-overlap] # pragma: no cover\n return False\n\n return any(\n is_class_and_subclass(annotation, constrained_type) # pyright: ignore\n for constrained_type in (\n pydantic_v1.ConstrainedBytes,\n pydantic_v1.ConstrainedDate,\n pydantic_v1.ConstrainedDecimal,\n pydantic_v1.ConstrainedFloat,\n pydantic_v1.ConstrainedFrozenSet,\n pydantic_v1.ConstrainedInt,\n pydantic_v1.ConstrainedList,\n pydantic_v1.ConstrainedSet,\n pydantic_v1.ConstrainedStr,\n )\n )\n\n\ndef pydantic_unwrap_and_get_origin(annotation: Any) -> Any | None:\n if pydantic_v2 is Empty or is_class_and_subclass(annotation, pydantic_v1.BaseModel): # type: ignore[comparison-overlap]\n return get_origin_or_inner_type(annotation)\n\n origin = annotation.__pydantic_generic_metadata__[\"origin\"]\n return instantiable_type_mapping.get(origin, origin)\n\n\ndef pydantic_get_type_hints_with_generics_resolved(\n annotation: Any,\n globalns: dict[str, Any] | None = None,\n localns: dict[str, Any] | None = None,\n include_extras: bool = False,\n) -> dict[str, Any]:\n if pydantic_v2 is Empty or is_class_and_subclass(annotation, pydantic_v1.BaseModel): # type: ignore[comparison-overlap]\n return get_type_hints_with_generics_resolved(annotation)\n\n origin = pydantic_unwrap_and_get_origin(annotation)\n if origin is None:\n type_hints = get_type_hints(annotation, globalns=globalns, localns=localns, include_extras=include_extras)\n typevar_map = {p: p for p in annotation.__pydantic_generic_metadata__[\"parameters\"]}\n else:\n type_hints = get_type_hints(origin, globalns=globalns, localns=localns, include_extras=include_extras)\n args = annotation.__pydantic_generic_metadata__[\"args\"]\n parameters = origin.__pydantic_generic_metadata__[\"parameters\"]\n typevar_map = dict(zip(parameters, args))\n\n return {n: _substitute_typevars(type_, typevar_map) for n, type_ in type_hints.items()}\n\n\ndef pydantic_get_unwrapped_annotation_and_type_hints(annotation: Any) -> tuple[Any, dict[str, Any]]:\n \"\"\"Get the unwrapped annotation and the type hints after resolving generics.\n\n Args:\n annotation: A type annotation.\n\n Returns:\n A tuple containing the unwrapped annotation and the type hints.\n \"\"\"\n\n if is_generic(annotation):\n origin = pydantic_unwrap_and_get_origin(annotation)\n return origin or annotation, pydantic_get_type_hints_with_generics_resolved(annotation, include_extras=True)\n return annotation, get_type_hints(annotation, include_extras=True)\n\n\ndef is_pydantic_2_model(\n obj: type[pydantic_v1.BaseModel | pydantic_v2.BaseModel], # pyright: ignore\n) -> TypeGuard[pydantic_v2.BaseModel]: # pyright: ignore\n return issubclass(obj, pydantic_v2.BaseModel) # pyright: ignore\n\n\ndef is_pydantic_undefined(value: Any) -> bool:\n return any(v is value for v in PYDANTIC_UNDEFINED_SENTINELS)\n", "path": "litestar/contrib/pydantic/utils.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any\n\nfrom typing_extensions import get_type_hints\n\nfrom litestar.types import Empty\nfrom litestar.utils import is_class_and_subclass\nfrom litestar.utils.predicates import is_generic\nfrom litestar.utils.typing import (\n _substitute_typevars,\n get_origin_or_inner_type,\n get_type_hints_with_generics_resolved,\n instantiable_type_mapping,\n)\n\n# isort: off\ntry:\n from pydantic import v1 as pydantic_v1\n import pydantic as pydantic_v2\n from pydantic.fields import PydanticUndefined as Pydantic2Undefined # type: ignore[attr-defined]\n from pydantic.v1.fields import Undefined as Pydantic1Undefined\n\n PYDANTIC_UNDEFINED_SENTINELS = {Pydantic1Undefined, Pydantic2Undefined}\nexcept ImportError:\n try:\n import pydantic as pydantic_v1 # type: ignore[no-redef]\n from pydantic.fields import Undefined as Pydantic1Undefined # type: ignore[attr-defined, no-redef]\n\n pydantic_v2 = Empty # type: ignore[assignment]\n PYDANTIC_UNDEFINED_SENTINELS = {Pydantic1Undefined}\n\n except ImportError: # pyright: ignore\n pydantic_v1 = Empty # type: ignore[assignment]\n pydantic_v2 = Empty # type: ignore[assignment]\n PYDANTIC_UNDEFINED_SENTINELS = set()\n# isort: on\n\n\nif TYPE_CHECKING:\n from typing_extensions import TypeGuard\n\n\ndef is_pydantic_model_class(\n annotation: Any,\n) -> TypeGuard[type[pydantic_v1.BaseModel | pydantic_v2.BaseModel]]: # pyright: ignore\n \"\"\"Given a type annotation determine if the annotation is a subclass of pydantic's BaseModel.\n\n Args:\n annotation: A type.\n\n Returns:\n A typeguard determining whether the type is :data:`BaseModel pydantic.BaseModel>`.\n \"\"\"\n if pydantic_v1 is Empty: # type: ignore[comparison-overlap] # pragma: no cover\n return False\n\n if pydantic_v2 is Empty: # type: ignore[comparison-overlap] # pragma: no cover\n return is_class_and_subclass(annotation, pydantic_v1.BaseModel)\n\n return is_class_and_subclass(annotation, (pydantic_v1.BaseModel, pydantic_v2.BaseModel))\n\n\ndef is_pydantic_model_instance(\n annotation: Any,\n) -> TypeGuard[pydantic_v1.BaseModel | pydantic_v2.BaseModel]: # pyright: ignore\n \"\"\"Given a type annotation determine if the annotation is an instance of pydantic's BaseModel.\n\n Args:\n annotation: A type.\n\n Returns:\n A typeguard determining whether the type is :data:`BaseModel pydantic.BaseModel>`.\n \"\"\"\n if pydantic_v1 is Empty: # type: ignore[comparison-overlap] # pragma: no cover\n return False\n\n if pydantic_v2 is Empty: # type: ignore[comparison-overlap] # pragma: no cover\n return isinstance(annotation, pydantic_v1.BaseModel)\n\n return isinstance(annotation, (pydantic_v1.BaseModel, pydantic_v2.BaseModel))\n\n\ndef is_pydantic_constrained_field(annotation: Any) -> bool:\n \"\"\"Check if the given annotation is a constrained pydantic type.\n\n Args:\n annotation: A type annotation\n\n Returns:\n True if pydantic is installed and the type is a constrained type, otherwise False.\n \"\"\"\n if pydantic_v1 is Empty: # type: ignore[comparison-overlap] # pragma: no cover\n return False\n\n return any(\n is_class_and_subclass(annotation, constrained_type) # pyright: ignore\n for constrained_type in (\n pydantic_v1.ConstrainedBytes,\n pydantic_v1.ConstrainedDate,\n pydantic_v1.ConstrainedDecimal,\n pydantic_v1.ConstrainedFloat,\n pydantic_v1.ConstrainedFrozenSet,\n pydantic_v1.ConstrainedInt,\n pydantic_v1.ConstrainedList,\n pydantic_v1.ConstrainedSet,\n pydantic_v1.ConstrainedStr,\n )\n )\n\n\ndef pydantic_unwrap_and_get_origin(annotation: Any) -> Any | None:\n if pydantic_v2 is Empty or is_class_and_subclass(annotation, pydantic_v1.BaseModel): # type: ignore[comparison-overlap]\n return get_origin_or_inner_type(annotation)\n\n origin = annotation.__pydantic_generic_metadata__[\"origin\"]\n return instantiable_type_mapping.get(origin, origin)\n\n\ndef pydantic_get_type_hints_with_generics_resolved(\n annotation: Any,\n globalns: dict[str, Any] | None = None,\n localns: dict[str, Any] | None = None,\n include_extras: bool = False,\n) -> dict[str, Any]:\n if pydantic_v2 is Empty or is_class_and_subclass(annotation, pydantic_v1.BaseModel): # type: ignore[comparison-overlap]\n return get_type_hints_with_generics_resolved(annotation)\n\n origin = pydantic_unwrap_and_get_origin(annotation)\n if origin is None:\n type_hints = get_type_hints(annotation, globalns=globalns, localns=localns, include_extras=include_extras)\n typevar_map = {p: p for p in annotation.__pydantic_generic_metadata__[\"parameters\"]}\n else:\n type_hints = get_type_hints(origin, globalns=globalns, localns=localns, include_extras=include_extras)\n args = annotation.__pydantic_generic_metadata__[\"args\"]\n parameters = origin.__pydantic_generic_metadata__[\"parameters\"]\n typevar_map = dict(zip(parameters, args))\n\n return {n: _substitute_typevars(type_, typevar_map) for n, type_ in type_hints.items()}\n\n\ndef pydantic_get_unwrapped_annotation_and_type_hints(annotation: Any) -> tuple[Any, dict[str, Any]]:\n \"\"\"Get the unwrapped annotation and the type hints after resolving generics.\n\n Args:\n annotation: A type annotation.\n\n Returns:\n A tuple containing the unwrapped annotation and the type hints.\n \"\"\"\n\n if is_generic(annotation):\n origin = pydantic_unwrap_and_get_origin(annotation)\n return origin or annotation, pydantic_get_type_hints_with_generics_resolved(annotation, include_extras=True)\n return annotation, get_type_hints(annotation, include_extras=True)\n\n\ndef is_pydantic_2_model(\n obj: type[pydantic_v1.BaseModel | pydantic_v2.BaseModel], # pyright: ignore\n) -> TypeGuard[pydantic_v2.BaseModel]: # pyright: ignore\n return pydantic_v2 is not Empty and issubclass(obj, pydantic_v2.BaseModel) # type: ignore[comparison-overlap]\n\n\ndef is_pydantic_undefined(value: Any) -> bool:\n return any(v is value for v in PYDANTIC_UNDEFINED_SENTINELS)\n", "path": "litestar/contrib/pydantic/utils.py"}]} | 4,086 | 194 |
gh_patches_debug_17268 | rasdani/github-patches | git_diff | OpenNMT__OpenNMT-py-1436 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Endless loop when loading dataset
Hi,
I'm a beginner of OpenNMT-py and I also encounter loop on loading training dataset. I have checked Issues 1079 and 1354 but it still not work. My training set is small and I have not use multiple GPU, just one. Please give me some comments, thank you!
My OpenNMT is the latest version and my os is Ubuntu18.04, here is the parameters I used:
python preprocess.py -train_src data/src-train.txt -train_tgt data/tgt-train.txt -valid_src data/src-val.txt -valid_tgt data/tgt-val.txt -save_data data/demo -src_seq_length 1000 -tgt_seq_length 1000 -dynamic_dict -share_vocab
CUDA_VISIBLE_DEVICES=0 python train.py -data data/demo -save_model demo-model -world_size 1 -gpu_ranks 0 -copy_attn
[2019-05-16 16:35:16,797 INFO] * src vocab size = 988
[2019-05-16 16:35:16,797 INFO] * tgt vocab size = 988
[2019-05-16 16:35:16,797 INFO] Building model...
[2019-05-16 16:35:19,271 INFO] NMTModel(
(encoder): RNNEncoder(
(embeddings): Embeddings(
(make_embedding): Sequential(
(emb_luts): Elementwise(
(0): Embedding(988, 500, padding_idx=1)
)
)
)
(rnn): LSTM(500, 500, num_layers=2, dropout=0.3)
)
(decoder): InputFeedRNNDecoder(
(embeddings): Embeddings(
(make_embedding): Sequential(
(emb_luts): Elementwise(
(0): Embedding(988, 500, padding_idx=1)
)
)
)
(dropout): Dropout(p=0.3)
(rnn): StackedLSTM(
(dropout): Dropout(p=0.3)
(layers): ModuleList(
(0): LSTMCell(1000, 500)
(1): LSTMCell(500, 500)
)
)
(attn): GlobalAttention(
(linear_in): Linear(in_features=500, out_features=500, bias=False)
(linear_out): Linear(in_features=1000, out_features=500, bias=False)
)
(copy_attn): GlobalAttention(
(linear_in): Linear(in_features=500, out_features=500, bias=False)
(linear_out): Linear(in_features=1000, out_features=500, bias=False)
)
)
(generator): CopyGenerator(
(linear): Linear(in_features=500, out_features=988, bias=True)
(linear_copy): Linear(in_features=500, out_features=1, bias=True)
)
)
[2019-05-16 16:35:19,272 INFO] encoder: 4502000
[2019-05-16 16:35:19,272 INFO] decoder: 7497489
[2019-05-16 16:35:19,272 INFO] * number of parameters: 11999489
[2019-05-16 16:35:19,308 INFO] Starting training on GPU: [0]
[2019-05-16 16:35:19,308 INFO] Start training loop and validate every 10000 steps...
[2019-05-16 16:35:19,330 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:19,382 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:19,437 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:19,498 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:19,562 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:19,632 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:19,705 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:19,782 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:19,864 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:19,950 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:20,041 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:20,136 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:20,235 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:20,339 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:20,448 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:20,560 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
[2019-05-16 16:35:20,678 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762
I have tried using CPU only and default parameters, but this situation also happens. How can I do to solve this problem?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `onmt/train_single.py`
Content:
```
1 #!/usr/bin/env python
2 """Training on a single process."""
3 import os
4
5 import torch
6
7 from onmt.inputters.inputter import build_dataset_iter, \
8 load_old_vocab, old_style_vocab, MultipleDatasetIterator
9 from onmt.model_builder import build_model
10 from onmt.utils.optimizers import Optimizer
11 from onmt.utils.misc import set_random_seed
12 from onmt.trainer import build_trainer
13 from onmt.models import build_model_saver
14 from onmt.utils.logging import init_logger, logger
15 from onmt.utils.parse import ArgumentParser
16
17
18 def _check_save_model_path(opt):
19 save_model_path = os.path.abspath(opt.save_model)
20 model_dirname = os.path.dirname(save_model_path)
21 if not os.path.exists(model_dirname):
22 os.makedirs(model_dirname)
23
24
25 def _tally_parameters(model):
26 enc = 0
27 dec = 0
28 for name, param in model.named_parameters():
29 if 'encoder' in name:
30 enc += param.nelement()
31 else:
32 dec += param.nelement()
33 return enc + dec, enc, dec
34
35
36 def configure_process(opt, device_id):
37 if device_id >= 0:
38 torch.cuda.set_device(device_id)
39 set_random_seed(opt.seed, device_id >= 0)
40
41
42 def main(opt, device_id):
43 # NOTE: It's important that ``opt`` has been validated and updated
44 # at this point.
45 configure_process(opt, device_id)
46 init_logger(opt.log_file)
47 assert len(opt.accum_count) == len(opt.accum_steps), \
48 'Number of accum_count values must match number of accum_steps'
49 # Load checkpoint if we resume from a previous training.
50 if opt.train_from:
51 logger.info('Loading checkpoint from %s' % opt.train_from)
52 checkpoint = torch.load(opt.train_from,
53 map_location=lambda storage, loc: storage)
54
55 model_opt = ArgumentParser.ckpt_model_opts(checkpoint["opt"])
56 ArgumentParser.update_model_opts(model_opt)
57 ArgumentParser.validate_model_opts(model_opt)
58 logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)
59 vocab = checkpoint['vocab']
60 else:
61 checkpoint = None
62 model_opt = opt
63 vocab = torch.load(opt.data + '.vocab.pt')
64
65 # check for code where vocab is saved instead of fields
66 # (in the future this will be done in a smarter way)
67 if old_style_vocab(vocab):
68 fields = load_old_vocab(
69 vocab, opt.model_type, dynamic_dict=opt.copy_attn)
70 else:
71 fields = vocab
72
73 # Report src and tgt vocab sizes, including for features
74 for side in ['src', 'tgt']:
75 f = fields[side]
76 try:
77 f_iter = iter(f)
78 except TypeError:
79 f_iter = [(side, f)]
80 for sn, sf in f_iter:
81 if sf.use_vocab:
82 logger.info(' * %s vocab size = %d' % (sn, len(sf.vocab)))
83
84 # Build model.
85 model = build_model(model_opt, opt, fields, checkpoint)
86 n_params, enc, dec = _tally_parameters(model)
87 logger.info('encoder: %d' % enc)
88 logger.info('decoder: %d' % dec)
89 logger.info('* number of parameters: %d' % n_params)
90 _check_save_model_path(opt)
91
92 # Build optimizer.
93 optim = Optimizer.from_opt(model, opt, checkpoint=checkpoint)
94
95 # Build model saver
96 model_saver = build_model_saver(model_opt, opt, model, fields, optim)
97
98 trainer = build_trainer(
99 opt, device_id, model, fields, optim, model_saver=model_saver)
100
101 train_iterables = []
102 for train_id in opt.data_ids:
103 if train_id:
104 shard_base = "train_" + train_id
105 else:
106 shard_base = "train"
107 iterable = build_dataset_iter(shard_base, fields, opt, multi=True)
108 train_iterables.append(iterable)
109
110 train_iter = MultipleDatasetIterator(train_iterables, device_id, opt)
111
112 valid_iter = build_dataset_iter(
113 "valid", fields, opt, is_train=False)
114
115 if len(opt.gpu_ranks):
116 logger.info('Starting training on GPU: %s' % opt.gpu_ranks)
117 else:
118 logger.info('Starting training on CPU, could be very slow')
119 train_steps = opt.train_steps
120 if opt.single_pass and train_steps > 0:
121 logger.warning("Option single_pass is enabled, ignoring train_steps.")
122 train_steps = 0
123 trainer.train(
124 train_iter,
125 train_steps,
126 save_checkpoint_steps=opt.save_checkpoint_steps,
127 valid_iter=valid_iter,
128 valid_steps=opt.valid_steps)
129
130 if opt.tensorboard:
131 trainer.report_manager.tensorboard_writer.close()
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/onmt/train_single.py b/onmt/train_single.py
--- a/onmt/train_single.py
+++ b/onmt/train_single.py
@@ -99,15 +99,14 @@
opt, device_id, model, fields, optim, model_saver=model_saver)
train_iterables = []
- for train_id in opt.data_ids:
- if train_id:
+ if len(opt.data_ids) > 1:
+ for train_id in opt.data_ids:
shard_base = "train_" + train_id
- else:
- shard_base = "train"
- iterable = build_dataset_iter(shard_base, fields, opt, multi=True)
- train_iterables.append(iterable)
-
- train_iter = MultipleDatasetIterator(train_iterables, device_id, opt)
+ iterable = build_dataset_iter(shard_base, fields, opt, multi=True)
+ train_iterables.append(iterable)
+ train_iter = MultipleDatasetIterator(train_iterables, device_id, opt)
+ else:
+ train_iter = build_dataset_iter("train", fields, opt)
valid_iter = build_dataset_iter(
"valid", fields, opt, is_train=False)
| {"golden_diff": "diff --git a/onmt/train_single.py b/onmt/train_single.py\n--- a/onmt/train_single.py\n+++ b/onmt/train_single.py\n@@ -99,15 +99,14 @@\n opt, device_id, model, fields, optim, model_saver=model_saver)\n \n train_iterables = []\n- for train_id in opt.data_ids:\n- if train_id:\n+ if len(opt.data_ids) > 1:\n+ for train_id in opt.data_ids:\n shard_base = \"train_\" + train_id\n- else:\n- shard_base = \"train\"\n- iterable = build_dataset_iter(shard_base, fields, opt, multi=True)\n- train_iterables.append(iterable)\n-\n- train_iter = MultipleDatasetIterator(train_iterables, device_id, opt)\n+ iterable = build_dataset_iter(shard_base, fields, opt, multi=True)\n+ train_iterables.append(iterable)\n+ train_iter = MultipleDatasetIterator(train_iterables, device_id, opt)\n+ else:\n+ train_iter = build_dataset_iter(\"train\", fields, opt)\n \n valid_iter = build_dataset_iter(\n \"valid\", fields, opt, is_train=False)\n", "issue": "Endless loop when loading dataset\nHi,\r\nI'm a beginner of OpenNMT-py and I also encounter loop on loading training dataset. I have checked Issues 1079 and 1354 but it still not work. My training set is small and I have not use multiple GPU, just one. Please give me some comments, thank you!\r\nMy OpenNMT is the latest version and my os is Ubuntu18.04, here is the parameters I used:\r\n\r\npython preprocess.py -train_src data/src-train.txt -train_tgt data/tgt-train.txt -valid_src data/src-val.txt -valid_tgt data/tgt-val.txt -save_data data/demo -src_seq_length 1000 -tgt_seq_length 1000 -dynamic_dict -share_vocab\r\n\r\nCUDA_VISIBLE_DEVICES=0 python train.py -data data/demo -save_model demo-model -world_size 1 -gpu_ranks 0 -copy_attn\r\n[2019-05-16 16:35:16,797 INFO] * src vocab size = 988\r\n[2019-05-16 16:35:16,797 INFO] * tgt vocab size = 988\r\n[2019-05-16 16:35:16,797 INFO] Building model...\r\n[2019-05-16 16:35:19,271 INFO] NMTModel(\r\n (encoder): RNNEncoder(\r\n (embeddings): Embeddings(\r\n (make_embedding): Sequential(\r\n (emb_luts): Elementwise(\r\n (0): Embedding(988, 500, padding_idx=1)\r\n )\r\n )\r\n )\r\n (rnn): LSTM(500, 500, num_layers=2, dropout=0.3)\r\n )\r\n (decoder): InputFeedRNNDecoder(\r\n (embeddings): Embeddings(\r\n (make_embedding): Sequential(\r\n (emb_luts): Elementwise(\r\n (0): Embedding(988, 500, padding_idx=1)\r\n )\r\n )\r\n )\r\n (dropout): Dropout(p=0.3)\r\n (rnn): StackedLSTM(\r\n (dropout): Dropout(p=0.3)\r\n (layers): ModuleList(\r\n (0): LSTMCell(1000, 500)\r\n (1): LSTMCell(500, 500)\r\n )\r\n )\r\n (attn): GlobalAttention(\r\n (linear_in): Linear(in_features=500, out_features=500, bias=False)\r\n (linear_out): Linear(in_features=1000, out_features=500, bias=False)\r\n )\r\n (copy_attn): GlobalAttention(\r\n (linear_in): Linear(in_features=500, out_features=500, bias=False)\r\n (linear_out): Linear(in_features=1000, out_features=500, bias=False)\r\n )\r\n )\r\n (generator): CopyGenerator(\r\n (linear): Linear(in_features=500, out_features=988, bias=True)\r\n (linear_copy): Linear(in_features=500, out_features=1, bias=True)\r\n )\r\n)\r\n[2019-05-16 16:35:19,272 INFO] encoder: 4502000\r\n[2019-05-16 16:35:19,272 INFO] decoder: 7497489\r\n[2019-05-16 16:35:19,272 INFO] * number of parameters: 11999489\r\n[2019-05-16 16:35:19,308 INFO] Starting training on GPU: [0]\r\n[2019-05-16 16:35:19,308 INFO] Start training loop and validate every 10000 steps...\r\n[2019-05-16 16:35:19,330 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:19,382 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:19,437 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:19,498 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:19,562 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:19,632 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:19,705 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:19,782 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:19,864 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:19,950 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:20,041 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:20,136 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:20,235 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:20,339 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:20,448 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:20,560 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n[2019-05-16 16:35:20,678 INFO] Loading dataset from data/demo.train.0.pt, number of examples: 762\r\n\r\nI have tried using CPU only and default parameters, but this situation also happens. How can I do to solve this problem?\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"Training on a single process.\"\"\"\nimport os\n\nimport torch\n\nfrom onmt.inputters.inputter import build_dataset_iter, \\\n load_old_vocab, old_style_vocab, MultipleDatasetIterator\nfrom onmt.model_builder import build_model\nfrom onmt.utils.optimizers import Optimizer\nfrom onmt.utils.misc import set_random_seed\nfrom onmt.trainer import build_trainer\nfrom onmt.models import build_model_saver\nfrom onmt.utils.logging import init_logger, logger\nfrom onmt.utils.parse import ArgumentParser\n\n\ndef _check_save_model_path(opt):\n save_model_path = os.path.abspath(opt.save_model)\n model_dirname = os.path.dirname(save_model_path)\n if not os.path.exists(model_dirname):\n os.makedirs(model_dirname)\n\n\ndef _tally_parameters(model):\n enc = 0\n dec = 0\n for name, param in model.named_parameters():\n if 'encoder' in name:\n enc += param.nelement()\n else:\n dec += param.nelement()\n return enc + dec, enc, dec\n\n\ndef configure_process(opt, device_id):\n if device_id >= 0:\n torch.cuda.set_device(device_id)\n set_random_seed(opt.seed, device_id >= 0)\n\n\ndef main(opt, device_id):\n # NOTE: It's important that ``opt`` has been validated and updated\n # at this point.\n configure_process(opt, device_id)\n init_logger(opt.log_file)\n assert len(opt.accum_count) == len(opt.accum_steps), \\\n 'Number of accum_count values must match number of accum_steps'\n # Load checkpoint if we resume from a previous training.\n if opt.train_from:\n logger.info('Loading checkpoint from %s' % opt.train_from)\n checkpoint = torch.load(opt.train_from,\n map_location=lambda storage, loc: storage)\n\n model_opt = ArgumentParser.ckpt_model_opts(checkpoint[\"opt\"])\n ArgumentParser.update_model_opts(model_opt)\n ArgumentParser.validate_model_opts(model_opt)\n logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)\n vocab = checkpoint['vocab']\n else:\n checkpoint = None\n model_opt = opt\n vocab = torch.load(opt.data + '.vocab.pt')\n\n # check for code where vocab is saved instead of fields\n # (in the future this will be done in a smarter way)\n if old_style_vocab(vocab):\n fields = load_old_vocab(\n vocab, opt.model_type, dynamic_dict=opt.copy_attn)\n else:\n fields = vocab\n\n # Report src and tgt vocab sizes, including for features\n for side in ['src', 'tgt']:\n f = fields[side]\n try:\n f_iter = iter(f)\n except TypeError:\n f_iter = [(side, f)]\n for sn, sf in f_iter:\n if sf.use_vocab:\n logger.info(' * %s vocab size = %d' % (sn, len(sf.vocab)))\n\n # Build model.\n model = build_model(model_opt, opt, fields, checkpoint)\n n_params, enc, dec = _tally_parameters(model)\n logger.info('encoder: %d' % enc)\n logger.info('decoder: %d' % dec)\n logger.info('* number of parameters: %d' % n_params)\n _check_save_model_path(opt)\n\n # Build optimizer.\n optim = Optimizer.from_opt(model, opt, checkpoint=checkpoint)\n\n # Build model saver\n model_saver = build_model_saver(model_opt, opt, model, fields, optim)\n\n trainer = build_trainer(\n opt, device_id, model, fields, optim, model_saver=model_saver)\n\n train_iterables = []\n for train_id in opt.data_ids:\n if train_id:\n shard_base = \"train_\" + train_id\n else:\n shard_base = \"train\"\n iterable = build_dataset_iter(shard_base, fields, opt, multi=True)\n train_iterables.append(iterable)\n\n train_iter = MultipleDatasetIterator(train_iterables, device_id, opt)\n\n valid_iter = build_dataset_iter(\n \"valid\", fields, opt, is_train=False)\n\n if len(opt.gpu_ranks):\n logger.info('Starting training on GPU: %s' % opt.gpu_ranks)\n else:\n logger.info('Starting training on CPU, could be very slow')\n train_steps = opt.train_steps\n if opt.single_pass and train_steps > 0:\n logger.warning(\"Option single_pass is enabled, ignoring train_steps.\")\n train_steps = 0\n trainer.train(\n train_iter,\n train_steps,\n save_checkpoint_steps=opt.save_checkpoint_steps,\n valid_iter=valid_iter,\n valid_steps=opt.valid_steps)\n\n if opt.tensorboard:\n trainer.report_manager.tensorboard_writer.close()\n", "path": "onmt/train_single.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"Training on a single process.\"\"\"\nimport os\n\nimport torch\n\nfrom onmt.inputters.inputter import build_dataset_iter, \\\n load_old_vocab, old_style_vocab, MultipleDatasetIterator\nfrom onmt.model_builder import build_model\nfrom onmt.utils.optimizers import Optimizer\nfrom onmt.utils.misc import set_random_seed\nfrom onmt.trainer import build_trainer\nfrom onmt.models import build_model_saver\nfrom onmt.utils.logging import init_logger, logger\nfrom onmt.utils.parse import ArgumentParser\n\n\ndef _check_save_model_path(opt):\n save_model_path = os.path.abspath(opt.save_model)\n model_dirname = os.path.dirname(save_model_path)\n if not os.path.exists(model_dirname):\n os.makedirs(model_dirname)\n\n\ndef _tally_parameters(model):\n enc = 0\n dec = 0\n for name, param in model.named_parameters():\n if 'encoder' in name:\n enc += param.nelement()\n else:\n dec += param.nelement()\n return enc + dec, enc, dec\n\n\ndef configure_process(opt, device_id):\n if device_id >= 0:\n torch.cuda.set_device(device_id)\n set_random_seed(opt.seed, device_id >= 0)\n\n\ndef main(opt, device_id):\n # NOTE: It's important that ``opt`` has been validated and updated\n # at this point.\n configure_process(opt, device_id)\n init_logger(opt.log_file)\n assert len(opt.accum_count) == len(opt.accum_steps), \\\n 'Number of accum_count values must match number of accum_steps'\n # Load checkpoint if we resume from a previous training.\n if opt.train_from:\n logger.info('Loading checkpoint from %s' % opt.train_from)\n checkpoint = torch.load(opt.train_from,\n map_location=lambda storage, loc: storage)\n\n model_opt = ArgumentParser.ckpt_model_opts(checkpoint[\"opt\"])\n ArgumentParser.update_model_opts(model_opt)\n ArgumentParser.validate_model_opts(model_opt)\n logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)\n vocab = checkpoint['vocab']\n else:\n checkpoint = None\n model_opt = opt\n vocab = torch.load(opt.data + '.vocab.pt')\n\n # check for code where vocab is saved instead of fields\n # (in the future this will be done in a smarter way)\n if old_style_vocab(vocab):\n fields = load_old_vocab(\n vocab, opt.model_type, dynamic_dict=opt.copy_attn)\n else:\n fields = vocab\n\n # Report src and tgt vocab sizes, including for features\n for side in ['src', 'tgt']:\n f = fields[side]\n try:\n f_iter = iter(f)\n except TypeError:\n f_iter = [(side, f)]\n for sn, sf in f_iter:\n if sf.use_vocab:\n logger.info(' * %s vocab size = %d' % (sn, len(sf.vocab)))\n\n # Build model.\n model = build_model(model_opt, opt, fields, checkpoint)\n n_params, enc, dec = _tally_parameters(model)\n logger.info('encoder: %d' % enc)\n logger.info('decoder: %d' % dec)\n logger.info('* number of parameters: %d' % n_params)\n _check_save_model_path(opt)\n\n # Build optimizer.\n optim = Optimizer.from_opt(model, opt, checkpoint=checkpoint)\n\n # Build model saver\n model_saver = build_model_saver(model_opt, opt, model, fields, optim)\n\n trainer = build_trainer(\n opt, device_id, model, fields, optim, model_saver=model_saver)\n\n train_iterables = []\n if len(opt.data_ids) > 1:\n for train_id in opt.data_ids:\n shard_base = \"train_\" + train_id\n iterable = build_dataset_iter(shard_base, fields, opt, multi=True)\n train_iterables.append(iterable)\n train_iter = MultipleDatasetIterator(train_iterables, device_id, opt)\n else:\n train_iter = build_dataset_iter(\"train\", fields, opt)\n\n valid_iter = build_dataset_iter(\n \"valid\", fields, opt, is_train=False)\n\n if len(opt.gpu_ranks):\n logger.info('Starting training on GPU: %s' % opt.gpu_ranks)\n else:\n logger.info('Starting training on CPU, could be very slow')\n train_steps = opt.train_steps\n if opt.single_pass and train_steps > 0:\n logger.warning(\"Option single_pass is enabled, ignoring train_steps.\")\n train_steps = 0\n trainer.train(\n train_iter,\n train_steps,\n save_checkpoint_steps=opt.save_checkpoint_steps,\n valid_iter=valid_iter,\n valid_steps=opt.valid_steps)\n\n if opt.tensorboard:\n trainer.report_manager.tensorboard_writer.close()\n", "path": "onmt/train_single.py"}]} | 3,298 | 262 |
gh_patches_debug_20066 | rasdani/github-patches | git_diff | pulp__pulpcore-4682 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
app entrypoint no longer supports --max-requests-jitter
**Version**
3.39
**Describe the bug**
--max-requests-jitter is not recognized
**To Reproduce**
Run the pulpcore-api entrypoint with --max-requests-jitter
**Expected behavior**
Accepts the argument
**Additional context**
Requested for Katello.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/entrypoint.py`
Content:
```
1 from contextvars import ContextVar
2 from logging import getLogger
3 import os
4 import socket
5
6 import click
7 import django
8 from django.conf import settings
9 from django.db import connection
10 from django.db.utils import InterfaceError, OperationalError
11 from gunicorn.workers.sync import SyncWorker
12 from gunicorn.app.base import BaseApplication
13
14 from pulpcore.app.apps import pulp_plugin_configs
15
16 logger = getLogger(__name__)
17
18
19 using_pulp_api_worker = ContextVar("using_pulp_api_worker", default=False)
20
21
22 class PulpApiWorker(SyncWorker):
23 def notify(self):
24 super().notify()
25 self.heartbeat()
26
27 def heartbeat(self):
28 try:
29 self.api_app_status, created = self.ApiAppStatus.objects.get_or_create(
30 name=self.name, defaults={"versions": self.versions}
31 )
32
33 if not created:
34 self.api_app_status.save_heartbeat()
35
36 if self.api_app_status.versions != self.versions:
37 self.api_app_status.versions = self.versions
38 self.api_app_status.save(update_fields=["versions"])
39
40 logger.debug(self.beat_msg)
41 except (InterfaceError, OperationalError):
42 connection.close_if_unusable_or_obsolete()
43 logger.info(self.fail_beat_msg)
44
45 def init_process(self):
46 os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pulpcore.app.settings")
47 django.setup()
48 from pulpcore.app.models import ApiAppStatus
49
50 if settings.API_APP_TTL < 2 * self.timeout:
51 logger.warn(
52 "API_APP_TTL (%s) is smaller than half the gunicorn timeout (%s). "
53 "You may experience workers wrongly reporting as missing",
54 settings.API_APP_TTL,
55 self.timeout,
56 )
57
58 self.ApiAppStatus = ApiAppStatus
59 self.api_app_status = None
60
61 self.name = "{pid}@{hostname}".format(pid=self.pid, hostname=socket.gethostname())
62 self.versions = {app.label: app.version for app in pulp_plugin_configs()}
63 self.beat_msg = (
64 "Api App '{name}' heartbeat written, sleeping for '{interarrival}' seconds".format(
65 name=self.name, interarrival=self.timeout
66 )
67 )
68 self.fail_beat_msg = (
69 "Api App '{name}' failed to write a heartbeat to the database, sleeping for "
70 "'{interarrival}' seconds."
71 ).format(name=self.name, interarrival=self.timeout)
72 super().init_process()
73
74 def run(self):
75 try:
76 super().run()
77 finally:
78 # cleanup
79 if self.api_app_status:
80 self.api_app_status.delete()
81
82
83 class PulpcoreApiApplication(BaseApplication):
84 def __init__(self, options):
85 self.options = options or {}
86 super().__init__()
87
88 def load_config(self):
89 [
90 self.cfg.set(key.lower(), value)
91 for key, value in self.options.items()
92 if value is not None
93 ]
94 self.cfg.set("default_proc_name", "pulpcore-api")
95 self.cfg.set("worker_class", PulpApiWorker.__module__ + "." + PulpApiWorker.__qualname__)
96
97 def load(self):
98 using_pulp_api_worker.set(True)
99
100 import pulpcore.app.wsgi
101
102 using_pulp_api_worker.set(False)
103 return pulpcore.app.wsgi.application
104
105
106 # Gunicorn options are adapted from:
107 # https://github.com/benoitc/gunicorn/blob/master/gunicorn/config.py
108
109
110 @click.option("--bind", "-b", default="[::]:24817")
111 @click.option("--workers", "-w", type=int)
112 # @click.option("--threads", "-w", type=int) # We don't use a threaded worker...
113 @click.option("--name", "-n", "proc_name")
114 @click.option("--timeout", "-t", type=int)
115 @click.option("--graceful-timeout", type=int)
116 @click.option("--keep-alive", "keepalive", type=int)
117 @click.option("--limit-request-line", type=int)
118 @click.option("--limit-request-fields", type=int)
119 @click.option("--limit-request-field-size", type=int)
120 @click.option("--max-requests", type=int)
121 @click.option("--access-logfile", "accesslog")
122 @click.option(
123 "--access-logformat",
124 "access_log_format",
125 default=(
126 "pulp [%({correlation-id}o)s]: "
127 '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"',
128 ),
129 )
130 @click.option("--error-logfile", "--log-file", "errorlog")
131 @click.option(
132 "--log-level", "loglevel", type=click.Choice(["debug", "info", "warning", "error", "critical"])
133 )
134 @click.option("--reload/--no-reload")
135 @click.option("--reload-engine", type=click.Choice(["auto", "poll", "inotify"]))
136 @click.option("--reload-extra-file", "reload_extra_files", multiple=True)
137 @click.option("--preload/--no-preload", "preload_app")
138 @click.option("--reuse-port/--no-reuse-port")
139 @click.option("--chdir")
140 @click.option("--user", "-u")
141 @click.option("--group", "-g")
142 @click.command()
143 def main(**options):
144 PulpcoreApiApplication(options).run()
145
```
Path: `pulpcore/content/entrypoint.py`
Content:
```
1 import click
2 from gunicorn.app.base import BaseApplication
3
4
5 class PulpcoreContentApplication(BaseApplication):
6 def __init__(self, options):
7 self.options = options or {}
8 super().__init__()
9
10 def load_config(self):
11 [
12 self.cfg.set(key.lower(), value)
13 for key, value in self.options.items()
14 if value is not None
15 ]
16 self.cfg.set("default_proc_name", "pulpcore-content")
17 self.cfg.set("worker_class", "aiohttp.GunicornWebWorker")
18
19 def load(self):
20 import pulpcore.content
21
22 return pulpcore.content.server
23
24
25 @click.option("--bind", "-b", default="[::]:24816")
26 @click.option("--workers", "-w", type=int)
27 # @click.option("--threads", "-w", type=int) # We don't use a threaded worker...
28 @click.option("--name", "-n", "proc_name")
29 @click.option("--timeout", "-t", type=int)
30 @click.option("--graceful-timeout", type=int)
31 @click.option("--keep-alive", "keepalive", type=int)
32 @click.option("--limit-request-line", type=int)
33 @click.option("--limit-request-fields", type=int)
34 @click.option("--limit-request-field-size", type=int)
35 @click.option("--max-requests", type=int)
36 @click.option("--access-logfile", "accesslog")
37 @click.option("--access-logformat", "access_log_format")
38 @click.option("--error-logfile", "--log-file", "errorlog")
39 @click.option(
40 "--log-level", "loglevel", type=click.Choice(["debug", "info", "warning", "error", "critical"])
41 )
42 @click.option("--reload/--no-reload")
43 @click.option("--reload-engine", type=click.Choice(["auto", "poll", "inotify"]))
44 @click.option("--reload-extra-file", "reload_extra_files", multiple=True)
45 @click.option("--preload/--no-preload", "preload_app")
46 @click.option("--reuse-port/--no-reuse-port")
47 @click.option("--chdir")
48 @click.option("--user", "-u")
49 @click.option("--group", "-g")
50 @click.command()
51 def main(**options):
52 PulpcoreContentApplication(options).run()
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/app/entrypoint.py b/pulpcore/app/entrypoint.py
--- a/pulpcore/app/entrypoint.py
+++ b/pulpcore/app/entrypoint.py
@@ -118,6 +118,7 @@
@click.option("--limit-request-fields", type=int)
@click.option("--limit-request-field-size", type=int)
@click.option("--max-requests", type=int)
[email protected]("--max-requests-jitter", type=int)
@click.option("--access-logfile", "accesslog")
@click.option(
"--access-logformat",
diff --git a/pulpcore/content/entrypoint.py b/pulpcore/content/entrypoint.py
--- a/pulpcore/content/entrypoint.py
+++ b/pulpcore/content/entrypoint.py
@@ -33,6 +33,7 @@
@click.option("--limit-request-fields", type=int)
@click.option("--limit-request-field-size", type=int)
@click.option("--max-requests", type=int)
[email protected]("--max-requests-jitter", type=int)
@click.option("--access-logfile", "accesslog")
@click.option("--access-logformat", "access_log_format")
@click.option("--error-logfile", "--log-file", "errorlog")
| {"golden_diff": "diff --git a/pulpcore/app/entrypoint.py b/pulpcore/app/entrypoint.py\n--- a/pulpcore/app/entrypoint.py\n+++ b/pulpcore/app/entrypoint.py\n@@ -118,6 +118,7 @@\n @click.option(\"--limit-request-fields\", type=int)\n @click.option(\"--limit-request-field-size\", type=int)\n @click.option(\"--max-requests\", type=int)\[email protected](\"--max-requests-jitter\", type=int)\n @click.option(\"--access-logfile\", \"accesslog\")\n @click.option(\n \"--access-logformat\",\ndiff --git a/pulpcore/content/entrypoint.py b/pulpcore/content/entrypoint.py\n--- a/pulpcore/content/entrypoint.py\n+++ b/pulpcore/content/entrypoint.py\n@@ -33,6 +33,7 @@\n @click.option(\"--limit-request-fields\", type=int)\n @click.option(\"--limit-request-field-size\", type=int)\n @click.option(\"--max-requests\", type=int)\[email protected](\"--max-requests-jitter\", type=int)\n @click.option(\"--access-logfile\", \"accesslog\")\n @click.option(\"--access-logformat\", \"access_log_format\")\n @click.option(\"--error-logfile\", \"--log-file\", \"errorlog\")\n", "issue": "app entrypoint no longer supports --max-requests-jitter\n**Version**\r\n3.39\r\n\r\n**Describe the bug**\r\n--max-requests-jitter is not recognized\r\n\r\n**To Reproduce**\r\nRun the pulpcore-api entrypoint with --max-requests-jitter\r\n\r\n**Expected behavior**\r\nAccepts the argument\r\n\r\n**Additional context**\r\nRequested for Katello.\r\n\n", "before_files": [{"content": "from contextvars import ContextVar\nfrom logging import getLogger\nimport os\nimport socket\n\nimport click\nimport django\nfrom django.conf import settings\nfrom django.db import connection\nfrom django.db.utils import InterfaceError, OperationalError\nfrom gunicorn.workers.sync import SyncWorker\nfrom gunicorn.app.base import BaseApplication\n\nfrom pulpcore.app.apps import pulp_plugin_configs\n\nlogger = getLogger(__name__)\n\n\nusing_pulp_api_worker = ContextVar(\"using_pulp_api_worker\", default=False)\n\n\nclass PulpApiWorker(SyncWorker):\n def notify(self):\n super().notify()\n self.heartbeat()\n\n def heartbeat(self):\n try:\n self.api_app_status, created = self.ApiAppStatus.objects.get_or_create(\n name=self.name, defaults={\"versions\": self.versions}\n )\n\n if not created:\n self.api_app_status.save_heartbeat()\n\n if self.api_app_status.versions != self.versions:\n self.api_app_status.versions = self.versions\n self.api_app_status.save(update_fields=[\"versions\"])\n\n logger.debug(self.beat_msg)\n except (InterfaceError, OperationalError):\n connection.close_if_unusable_or_obsolete()\n logger.info(self.fail_beat_msg)\n\n def init_process(self):\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"pulpcore.app.settings\")\n django.setup()\n from pulpcore.app.models import ApiAppStatus\n\n if settings.API_APP_TTL < 2 * self.timeout:\n logger.warn(\n \"API_APP_TTL (%s) is smaller than half the gunicorn timeout (%s). \"\n \"You may experience workers wrongly reporting as missing\",\n settings.API_APP_TTL,\n self.timeout,\n )\n\n self.ApiAppStatus = ApiAppStatus\n self.api_app_status = None\n\n self.name = \"{pid}@{hostname}\".format(pid=self.pid, hostname=socket.gethostname())\n self.versions = {app.label: app.version for app in pulp_plugin_configs()}\n self.beat_msg = (\n \"Api App '{name}' heartbeat written, sleeping for '{interarrival}' seconds\".format(\n name=self.name, interarrival=self.timeout\n )\n )\n self.fail_beat_msg = (\n \"Api App '{name}' failed to write a heartbeat to the database, sleeping for \"\n \"'{interarrival}' seconds.\"\n ).format(name=self.name, interarrival=self.timeout)\n super().init_process()\n\n def run(self):\n try:\n super().run()\n finally:\n # cleanup\n if self.api_app_status:\n self.api_app_status.delete()\n\n\nclass PulpcoreApiApplication(BaseApplication):\n def __init__(self, options):\n self.options = options or {}\n super().__init__()\n\n def load_config(self):\n [\n self.cfg.set(key.lower(), value)\n for key, value in self.options.items()\n if value is not None\n ]\n self.cfg.set(\"default_proc_name\", \"pulpcore-api\")\n self.cfg.set(\"worker_class\", PulpApiWorker.__module__ + \".\" + PulpApiWorker.__qualname__)\n\n def load(self):\n using_pulp_api_worker.set(True)\n\n import pulpcore.app.wsgi\n\n using_pulp_api_worker.set(False)\n return pulpcore.app.wsgi.application\n\n\n# Gunicorn options are adapted from:\n# https://github.com/benoitc/gunicorn/blob/master/gunicorn/config.py\n\n\[email protected](\"--bind\", \"-b\", default=\"[::]:24817\")\[email protected](\"--workers\", \"-w\", type=int)\n# @click.option(\"--threads\", \"-w\", type=int) # We don't use a threaded worker...\[email protected](\"--name\", \"-n\", \"proc_name\")\[email protected](\"--timeout\", \"-t\", type=int)\[email protected](\"--graceful-timeout\", type=int)\[email protected](\"--keep-alive\", \"keepalive\", type=int)\[email protected](\"--limit-request-line\", type=int)\[email protected](\"--limit-request-fields\", type=int)\[email protected](\"--limit-request-field-size\", type=int)\[email protected](\"--max-requests\", type=int)\[email protected](\"--access-logfile\", \"accesslog\")\[email protected](\n \"--access-logformat\",\n \"access_log_format\",\n default=(\n \"pulp [%({correlation-id}o)s]: \"\n '%(h)s %(l)s %(u)s %(t)s \"%(r)s\" %(s)s %(b)s \"%(f)s\" \"%(a)s\"',\n ),\n)\[email protected](\"--error-logfile\", \"--log-file\", \"errorlog\")\[email protected](\n \"--log-level\", \"loglevel\", type=click.Choice([\"debug\", \"info\", \"warning\", \"error\", \"critical\"])\n)\[email protected](\"--reload/--no-reload\")\[email protected](\"--reload-engine\", type=click.Choice([\"auto\", \"poll\", \"inotify\"]))\[email protected](\"--reload-extra-file\", \"reload_extra_files\", multiple=True)\[email protected](\"--preload/--no-preload\", \"preload_app\")\[email protected](\"--reuse-port/--no-reuse-port\")\[email protected](\"--chdir\")\[email protected](\"--user\", \"-u\")\[email protected](\"--group\", \"-g\")\[email protected]()\ndef main(**options):\n PulpcoreApiApplication(options).run()\n", "path": "pulpcore/app/entrypoint.py"}, {"content": "import click\nfrom gunicorn.app.base import BaseApplication\n\n\nclass PulpcoreContentApplication(BaseApplication):\n def __init__(self, options):\n self.options = options or {}\n super().__init__()\n\n def load_config(self):\n [\n self.cfg.set(key.lower(), value)\n for key, value in self.options.items()\n if value is not None\n ]\n self.cfg.set(\"default_proc_name\", \"pulpcore-content\")\n self.cfg.set(\"worker_class\", \"aiohttp.GunicornWebWorker\")\n\n def load(self):\n import pulpcore.content\n\n return pulpcore.content.server\n\n\[email protected](\"--bind\", \"-b\", default=\"[::]:24816\")\[email protected](\"--workers\", \"-w\", type=int)\n# @click.option(\"--threads\", \"-w\", type=int) # We don't use a threaded worker...\[email protected](\"--name\", \"-n\", \"proc_name\")\[email protected](\"--timeout\", \"-t\", type=int)\[email protected](\"--graceful-timeout\", type=int)\[email protected](\"--keep-alive\", \"keepalive\", type=int)\[email protected](\"--limit-request-line\", type=int)\[email protected](\"--limit-request-fields\", type=int)\[email protected](\"--limit-request-field-size\", type=int)\[email protected](\"--max-requests\", type=int)\[email protected](\"--access-logfile\", \"accesslog\")\[email protected](\"--access-logformat\", \"access_log_format\")\[email protected](\"--error-logfile\", \"--log-file\", \"errorlog\")\[email protected](\n \"--log-level\", \"loglevel\", type=click.Choice([\"debug\", \"info\", \"warning\", \"error\", \"critical\"])\n)\[email protected](\"--reload/--no-reload\")\[email protected](\"--reload-engine\", type=click.Choice([\"auto\", \"poll\", \"inotify\"]))\[email protected](\"--reload-extra-file\", \"reload_extra_files\", multiple=True)\[email protected](\"--preload/--no-preload\", \"preload_app\")\[email protected](\"--reuse-port/--no-reuse-port\")\[email protected](\"--chdir\")\[email protected](\"--user\", \"-u\")\[email protected](\"--group\", \"-g\")\[email protected]()\ndef main(**options):\n PulpcoreContentApplication(options).run()\n", "path": "pulpcore/content/entrypoint.py"}], "after_files": [{"content": "from contextvars import ContextVar\nfrom logging import getLogger\nimport os\nimport socket\n\nimport click\nimport django\nfrom django.conf import settings\nfrom django.db import connection\nfrom django.db.utils import InterfaceError, OperationalError\nfrom gunicorn.workers.sync import SyncWorker\nfrom gunicorn.app.base import BaseApplication\n\nfrom pulpcore.app.apps import pulp_plugin_configs\n\nlogger = getLogger(__name__)\n\n\nusing_pulp_api_worker = ContextVar(\"using_pulp_api_worker\", default=False)\n\n\nclass PulpApiWorker(SyncWorker):\n def notify(self):\n super().notify()\n self.heartbeat()\n\n def heartbeat(self):\n try:\n self.api_app_status, created = self.ApiAppStatus.objects.get_or_create(\n name=self.name, defaults={\"versions\": self.versions}\n )\n\n if not created:\n self.api_app_status.save_heartbeat()\n\n if self.api_app_status.versions != self.versions:\n self.api_app_status.versions = self.versions\n self.api_app_status.save(update_fields=[\"versions\"])\n\n logger.debug(self.beat_msg)\n except (InterfaceError, OperationalError):\n connection.close_if_unusable_or_obsolete()\n logger.info(self.fail_beat_msg)\n\n def init_process(self):\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"pulpcore.app.settings\")\n django.setup()\n from pulpcore.app.models import ApiAppStatus\n\n if settings.API_APP_TTL < 2 * self.timeout:\n logger.warn(\n \"API_APP_TTL (%s) is smaller than half the gunicorn timeout (%s). \"\n \"You may experience workers wrongly reporting as missing\",\n settings.API_APP_TTL,\n self.timeout,\n )\n\n self.ApiAppStatus = ApiAppStatus\n self.api_app_status = None\n\n self.name = \"{pid}@{hostname}\".format(pid=self.pid, hostname=socket.gethostname())\n self.versions = {app.label: app.version for app in pulp_plugin_configs()}\n self.beat_msg = (\n \"Api App '{name}' heartbeat written, sleeping for '{interarrival}' seconds\".format(\n name=self.name, interarrival=self.timeout\n )\n )\n self.fail_beat_msg = (\n \"Api App '{name}' failed to write a heartbeat to the database, sleeping for \"\n \"'{interarrival}' seconds.\"\n ).format(name=self.name, interarrival=self.timeout)\n super().init_process()\n\n def run(self):\n try:\n super().run()\n finally:\n # cleanup\n if self.api_app_status:\n self.api_app_status.delete()\n\n\nclass PulpcoreApiApplication(BaseApplication):\n def __init__(self, options):\n self.options = options or {}\n super().__init__()\n\n def load_config(self):\n [\n self.cfg.set(key.lower(), value)\n for key, value in self.options.items()\n if value is not None\n ]\n self.cfg.set(\"default_proc_name\", \"pulpcore-api\")\n self.cfg.set(\"worker_class\", PulpApiWorker.__module__ + \".\" + PulpApiWorker.__qualname__)\n\n def load(self):\n using_pulp_api_worker.set(True)\n\n import pulpcore.app.wsgi\n\n using_pulp_api_worker.set(False)\n return pulpcore.app.wsgi.application\n\n\n# Gunicorn options are adapted from:\n# https://github.com/benoitc/gunicorn/blob/master/gunicorn/config.py\n\n\[email protected](\"--bind\", \"-b\", default=\"[::]:24817\")\[email protected](\"--workers\", \"-w\", type=int)\n# @click.option(\"--threads\", \"-w\", type=int) # We don't use a threaded worker...\[email protected](\"--name\", \"-n\", \"proc_name\")\[email protected](\"--timeout\", \"-t\", type=int)\[email protected](\"--graceful-timeout\", type=int)\[email protected](\"--keep-alive\", \"keepalive\", type=int)\[email protected](\"--limit-request-line\", type=int)\[email protected](\"--limit-request-fields\", type=int)\[email protected](\"--limit-request-field-size\", type=int)\[email protected](\"--max-requests\", type=int)\[email protected](\"--max-requests-jitter\", type=int)\[email protected](\"--access-logfile\", \"accesslog\")\[email protected](\n \"--access-logformat\",\n \"access_log_format\",\n default=(\n \"pulp [%({correlation-id}o)s]: \"\n '%(h)s %(l)s %(u)s %(t)s \"%(r)s\" %(s)s %(b)s \"%(f)s\" \"%(a)s\"',\n ),\n)\[email protected](\"--error-logfile\", \"--log-file\", \"errorlog\")\[email protected](\n \"--log-level\", \"loglevel\", type=click.Choice([\"debug\", \"info\", \"warning\", \"error\", \"critical\"])\n)\[email protected](\"--reload/--no-reload\")\[email protected](\"--reload-engine\", type=click.Choice([\"auto\", \"poll\", \"inotify\"]))\[email protected](\"--reload-extra-file\", \"reload_extra_files\", multiple=True)\[email protected](\"--preload/--no-preload\", \"preload_app\")\[email protected](\"--reuse-port/--no-reuse-port\")\[email protected](\"--chdir\")\[email protected](\"--user\", \"-u\")\[email protected](\"--group\", \"-g\")\[email protected]()\ndef main(**options):\n PulpcoreApiApplication(options).run()\n", "path": "pulpcore/app/entrypoint.py"}, {"content": "import click\nfrom gunicorn.app.base import BaseApplication\n\n\nclass PulpcoreContentApplication(BaseApplication):\n def __init__(self, options):\n self.options = options or {}\n super().__init__()\n\n def load_config(self):\n [\n self.cfg.set(key.lower(), value)\n for key, value in self.options.items()\n if value is not None\n ]\n self.cfg.set(\"default_proc_name\", \"pulpcore-content\")\n self.cfg.set(\"worker_class\", \"aiohttp.GunicornWebWorker\")\n\n def load(self):\n import pulpcore.content\n\n return pulpcore.content.server\n\n\[email protected](\"--bind\", \"-b\", default=\"[::]:24816\")\[email protected](\"--workers\", \"-w\", type=int)\n# @click.option(\"--threads\", \"-w\", type=int) # We don't use a threaded worker...\[email protected](\"--name\", \"-n\", \"proc_name\")\[email protected](\"--timeout\", \"-t\", type=int)\[email protected](\"--graceful-timeout\", type=int)\[email protected](\"--keep-alive\", \"keepalive\", type=int)\[email protected](\"--limit-request-line\", type=int)\[email protected](\"--limit-request-fields\", type=int)\[email protected](\"--limit-request-field-size\", type=int)\[email protected](\"--max-requests\", type=int)\[email protected](\"--max-requests-jitter\", type=int)\[email protected](\"--access-logfile\", \"accesslog\")\[email protected](\"--access-logformat\", \"access_log_format\")\[email protected](\"--error-logfile\", \"--log-file\", \"errorlog\")\[email protected](\n \"--log-level\", \"loglevel\", type=click.Choice([\"debug\", \"info\", \"warning\", \"error\", \"critical\"])\n)\[email protected](\"--reload/--no-reload\")\[email protected](\"--reload-engine\", type=click.Choice([\"auto\", \"poll\", \"inotify\"]))\[email protected](\"--reload-extra-file\", \"reload_extra_files\", multiple=True)\[email protected](\"--preload/--no-preload\", \"preload_app\")\[email protected](\"--reuse-port/--no-reuse-port\")\[email protected](\"--chdir\")\[email protected](\"--user\", \"-u\")\[email protected](\"--group\", \"-g\")\[email protected]()\ndef main(**options):\n PulpcoreContentApplication(options).run()\n", "path": "pulpcore/content/entrypoint.py"}]} | 2,399 | 273 |
gh_patches_debug_864 | rasdani/github-patches | git_diff | ivy-llc__ivy-28478 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix Frontend Failing Test: jax - manipulation.paddle.tile
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/paddle/manipulation.py`
Content:
```
1 # global
2 import ivy
3 from ivy.functional.frontends.paddle.func_wrapper import (
4 to_ivy_arrays_and_back,
5 )
6 from ivy.func_wrapper import (
7 with_unsupported_dtypes,
8 with_supported_dtypes,
9 with_supported_device_and_dtypes,
10 )
11
12
13 @with_unsupported_dtypes({"2.6.0 and below": ("float16", "bfloat16")}, "paddle")
14 @to_ivy_arrays_and_back
15 def abs(x, name=None):
16 return ivy.abs(x)
17
18
19 @with_supported_dtypes(
20 {"2.6.0 and below": ("bool", "float32", "float64", "int32", "int64")},
21 "paddle",
22 )
23 @to_ivy_arrays_and_back
24 def broadcast_to(x, shape, name=None):
25 return ivy.broadcast_to(x, shape)
26
27
28 @with_supported_dtypes(
29 {
30 "2.6.0 and below": (
31 "bool",
32 "float16",
33 "float32",
34 "float64",
35 "int32",
36 "int64",
37 "uint8",
38 )
39 },
40 "paddle",
41 )
42 @to_ivy_arrays_and_back
43 def cast(x, dtype):
44 return ivy.astype(x, dtype)
45
46
47 @with_unsupported_dtypes({"2.6.0 and below": ("int8", "int16")}, "paddle")
48 @to_ivy_arrays_and_back
49 def concat(x, axis, name=None):
50 return ivy.concat(x, axis=axis)
51
52
53 @with_supported_dtypes(
54 {"2.6.0 and below": ("bool", "float32", "float64", "int32", "int64")},
55 "paddle",
56 )
57 @to_ivy_arrays_and_back
58 def expand(x, shape, name=None):
59 return ivy.expand(x, shape)
60
61
62 @with_unsupported_dtypes(
63 {"2.6.0 and below": ("int8", "uint8", "int16", "float16")},
64 "paddle",
65 )
66 @to_ivy_arrays_and_back
67 def flip(x, axis, name=None):
68 return ivy.flip(x, axis=axis)
69
70
71 @with_supported_dtypes(
72 {"2.6.0 and below": ("bool", "float32", "float64", "int32", "int64")},
73 "paddle",
74 )
75 @to_ivy_arrays_and_back
76 def gather(params, indices, axis=-1, batch_dims=0, name=None):
77 return ivy.gather(params, indices, axis=axis, batch_dims=batch_dims)
78
79
80 @with_unsupported_dtypes(
81 {"2.6.0 and below": ("int8", "uint8", "int16", "uint16", "float16", "bfloat16")},
82 "paddle",
83 )
84 @to_ivy_arrays_and_back
85 def gather_nd(x, index, name=None):
86 return ivy.gather_nd(x, index)
87
88
89 @with_supported_dtypes(
90 {"2.6.0 and below": ("bool", "int32", "int64", "float16", "float32", "float64")},
91 "paddle",
92 )
93 @to_ivy_arrays_and_back
94 def index_add(x, index, axis, value, *, name=None):
95 x = ivy.swapaxes(x, axis, 0)
96 value = ivy.swapaxes(value, axis, 0)
97 _to_adds = []
98 index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda i: i[0]))
99 while index:
100 _curr_idx = index[0][0]
101 while len(_to_adds) < _curr_idx:
102 _to_adds.append(ivy.zeros_like(value[0]))
103 _to_add_cum = ivy.get_item(value, index[0][1])
104 while (len(index)) > 1 and (index[0][0] == index[1][0]):
105 _to_add_cum = _to_add_cum + ivy.get_item(value, index.pop(1)[1])
106 index.pop(0)
107 _to_adds.append(_to_add_cum)
108 while len(_to_adds) < x.shape[0]:
109 _to_adds.append(ivy.zeros_like(value[0]))
110 _to_adds = ivy.stack(_to_adds)
111 if len(x.shape) < 2:
112 # Added this line due to the paddle backend treating scalars as 1-d arrays
113 _to_adds = ivy.flatten(_to_adds)
114
115 ret = ivy.add(x, _to_adds)
116 ret = ivy.swapaxes(ret, axis, 0)
117 return ret
118
119
120 @to_ivy_arrays_and_back
121 def put_along_axis(arr, indices, values, axis, reduce="assign"):
122 result = ivy.put_along_axis(arr, indices, values, axis)
123 return result
124
125
126 @with_supported_dtypes(
127 {"2.6.0 and below": ("int32", "int64", "float32", "float64")},
128 "paddle",
129 )
130 @to_ivy_arrays_and_back
131 def repeat_interleave(x, repeats, axis=None, name=None):
132 return ivy.repeat(x, repeats, axis=axis)
133
134
135 @to_ivy_arrays_and_back
136 def reshape(x, shape, name=None):
137 return ivy.reshape(x, shape)
138
139
140 @with_supported_dtypes(
141 {
142 "2.5.0 and below": (
143 "float32",
144 "float64",
145 "int32",
146 "int64",
147 "complex64",
148 "complex128",
149 )
150 },
151 "paddle",
152 )
153 @to_ivy_arrays_and_back
154 def roll(x, shifts, axis=None, name=None):
155 return ivy.roll(x, shifts, axis=axis)
156
157
158 @with_supported_device_and_dtypes(
159 {
160 "2.6.0 and above": {
161 "cpu": (
162 "bool",
163 "int32",
164 "int64",
165 "float32",
166 "float64",
167 ),
168 "gpu": ("float16",),
169 },
170 },
171 "paddle",
172 )
173 @to_ivy_arrays_and_back
174 def rot90(x, k=1, axes=(0, 1), name=None):
175 return ivy.rot90(x, k=k, axes=axes)
176
177
178 @with_unsupported_dtypes(
179 {"2.6.0 and below": ("int16", "complex64", "complex128")},
180 "paddle",
181 )
182 @to_ivy_arrays_and_back
183 def split(x, num_or_sections, axis=0, name=None):
184 return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)
185
186
187 @with_unsupported_dtypes(
188 {"2.6.0 and below": ("float16", "bfloat16", "int8", "int16")},
189 "paddle",
190 )
191 @to_ivy_arrays_and_back
192 def squeeze(x, axis=None, name=None):
193 return ivy.squeeze(x, axis=axis)
194
195
196 @to_ivy_arrays_and_back
197 def stack(x, axis=0, name=None):
198 return ivy.stack(x, axis=axis)
199
200
201 @with_supported_dtypes(
202 {"2.6.0 and below": ("float32", "float64")},
203 "paddle",
204 )
205 @to_ivy_arrays_and_back
206 def take_along_axis(arr, indices, axis):
207 return ivy.take_along_axis(arr, indices, axis)
208
209
210 @with_unsupported_dtypes(
211 {"2.6.0 and below": ("int8", "uint8", "int16", "float16")},
212 "paddle",
213 )
214 @to_ivy_arrays_and_back
215 def tile(x, repeat_times, name=None):
216 return ivy.tile(x, repeats=repeat_times)
217
218
219 @to_ivy_arrays_and_back
220 def tolist(x):
221 return ivy.to_list(x)
222
223
224 @with_supported_dtypes(
225 {"2.6.0 and below": ("bool", "int32", "int64", "float16", "float32", "float64")},
226 "paddle",
227 )
228 @to_ivy_arrays_and_back
229 def unbind(input, axis=0):
230 shape = list(input.shape)
231 num_splits = shape[axis]
232 shape.pop(axis)
233 return tuple(x.reshape(tuple(shape)) for x in split(input, num_splits, axis=axis))
234
235
236 @with_supported_dtypes(
237 {"2.6.0 and below": ("bool", "int32", "int64", "float16", "float32", "float64")},
238 "paddle",
239 )
240 @to_ivy_arrays_and_back
241 def unique_consecutive(x, axis=0):
242 return ivy.unique_consecutive(x, axis=axis)
243
244
245 @with_supported_dtypes(
246 {
247 "2.6.0 and below": (
248 "float32",
249 "float64",
250 "int32",
251 "int64",
252 )
253 },
254 "paddle",
255 )
256 @to_ivy_arrays_and_back
257 def unstack(x, axis=0, name=None):
258 return ivy.unstack(x, axis=axis)
259
260
261 absolute = abs
262
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/paddle/manipulation.py b/ivy/functional/frontends/paddle/manipulation.py
--- a/ivy/functional/frontends/paddle/manipulation.py
+++ b/ivy/functional/frontends/paddle/manipulation.py
@@ -208,7 +208,7 @@
@with_unsupported_dtypes(
- {"2.6.0 and below": ("int8", "uint8", "int16", "float16")},
+ {"2.6.0 and below": ("int8", "uint8", "int16", "float16", "bfloat16")},
"paddle",
)
@to_ivy_arrays_and_back
| {"golden_diff": "diff --git a/ivy/functional/frontends/paddle/manipulation.py b/ivy/functional/frontends/paddle/manipulation.py\n--- a/ivy/functional/frontends/paddle/manipulation.py\n+++ b/ivy/functional/frontends/paddle/manipulation.py\n@@ -208,7 +208,7 @@\n \n \n @with_unsupported_dtypes(\n- {\"2.6.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n+ {\"2.6.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\", \"bfloat16\")},\n \"paddle\",\n )\n @to_ivy_arrays_and_back\n", "issue": "Fix Frontend Failing Test: jax - manipulation.paddle.tile\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n with_supported_dtypes,\n with_supported_device_and_dtypes,\n)\n\n\n@with_unsupported_dtypes({\"2.6.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef broadcast_to(x, shape, name=None):\n return ivy.broadcast_to(x, shape)\n\n\n@with_supported_dtypes(\n {\n \"2.6.0 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cast(x, dtype):\n return ivy.astype(x, dtype)\n\n\n@with_unsupported_dtypes({\"2.6.0 and below\": (\"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef concat(x, axis, name=None):\n return ivy.concat(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef expand(x, shape, name=None):\n return ivy.expand(x, shape)\n\n\n@with_unsupported_dtypes(\n {\"2.6.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef flip(x, axis, name=None):\n return ivy.flip(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef gather(params, indices, axis=-1, batch_dims=0, name=None):\n return ivy.gather(params, indices, axis=axis, batch_dims=batch_dims)\n\n\n@with_unsupported_dtypes(\n {\"2.6.0 and below\": (\"int8\", \"uint8\", \"int16\", \"uint16\", \"float16\", \"bfloat16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef gather_nd(x, index, name=None):\n return ivy.gather_nd(x, index)\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"bool\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef index_add(x, index, axis, value, *, name=None):\n x = ivy.swapaxes(x, axis, 0)\n value = ivy.swapaxes(value, axis, 0)\n _to_adds = []\n index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda i: i[0]))\n while index:\n _curr_idx = index[0][0]\n while len(_to_adds) < _curr_idx:\n _to_adds.append(ivy.zeros_like(value[0]))\n _to_add_cum = ivy.get_item(value, index[0][1])\n while (len(index)) > 1 and (index[0][0] == index[1][0]):\n _to_add_cum = _to_add_cum + ivy.get_item(value, index.pop(1)[1])\n index.pop(0)\n _to_adds.append(_to_add_cum)\n while len(_to_adds) < x.shape[0]:\n _to_adds.append(ivy.zeros_like(value[0]))\n _to_adds = ivy.stack(_to_adds)\n if len(x.shape) < 2:\n # Added this line due to the paddle backend treating scalars as 1-d arrays\n _to_adds = ivy.flatten(_to_adds)\n\n ret = ivy.add(x, _to_adds)\n ret = ivy.swapaxes(ret, axis, 0)\n return ret\n\n\n@to_ivy_arrays_and_back\ndef put_along_axis(arr, indices, values, axis, reduce=\"assign\"):\n result = ivy.put_along_axis(arr, indices, values, axis)\n return result\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"int32\", \"int64\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef repeat_interleave(x, repeats, axis=None, name=None):\n return ivy.repeat(x, repeats, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef reshape(x, shape, name=None):\n return ivy.reshape(x, shape)\n\n\n@with_supported_dtypes(\n {\n \"2.5.0 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef roll(x, shifts, axis=None, name=None):\n return ivy.roll(x, shifts, axis=axis)\n\n\n@with_supported_device_and_dtypes(\n {\n \"2.6.0 and above\": {\n \"cpu\": (\n \"bool\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n ),\n \"gpu\": (\"float16\",),\n },\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef rot90(x, k=1, axes=(0, 1), name=None):\n return ivy.rot90(x, k=k, axes=axes)\n\n\n@with_unsupported_dtypes(\n {\"2.6.0 and below\": (\"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef split(x, num_or_sections, axis=0, name=None):\n return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.6.0 and below\": (\"float16\", \"bfloat16\", \"int8\", \"int16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef squeeze(x, axis=None, name=None):\n return ivy.squeeze(x, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef stack(x, axis=0, name=None):\n return ivy.stack(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef take_along_axis(arr, indices, axis):\n return ivy.take_along_axis(arr, indices, axis)\n\n\n@with_unsupported_dtypes(\n {\"2.6.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef tile(x, repeat_times, name=None):\n return ivy.tile(x, repeats=repeat_times)\n\n\n@to_ivy_arrays_and_back\ndef tolist(x):\n return ivy.to_list(x)\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"bool\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef unbind(input, axis=0):\n shape = list(input.shape)\n num_splits = shape[axis]\n shape.pop(axis)\n return tuple(x.reshape(tuple(shape)) for x in split(input, num_splits, axis=axis))\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"bool\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef unique_consecutive(x, axis=0):\n return ivy.unique_consecutive(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\n \"2.6.0 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef unstack(x, axis=0, name=None):\n return ivy.unstack(x, axis=axis)\n\n\nabsolute = abs\n", "path": "ivy/functional/frontends/paddle/manipulation.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\nfrom ivy.func_wrapper import (\n with_unsupported_dtypes,\n with_supported_dtypes,\n with_supported_device_and_dtypes,\n)\n\n\n@with_unsupported_dtypes({\"2.6.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef broadcast_to(x, shape, name=None):\n return ivy.broadcast_to(x, shape)\n\n\n@with_supported_dtypes(\n {\n \"2.6.0 and below\": (\n \"bool\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"uint8\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cast(x, dtype):\n return ivy.astype(x, dtype)\n\n\n@with_unsupported_dtypes({\"2.6.0 and below\": (\"int8\", \"int16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef concat(x, axis, name=None):\n return ivy.concat(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef expand(x, shape, name=None):\n return ivy.expand(x, shape)\n\n\n@with_unsupported_dtypes(\n {\"2.6.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef flip(x, axis, name=None):\n return ivy.flip(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"bool\", \"float32\", \"float64\", \"int32\", \"int64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef gather(params, indices, axis=-1, batch_dims=0, name=None):\n return ivy.gather(params, indices, axis=axis, batch_dims=batch_dims)\n\n\n@with_unsupported_dtypes(\n {\"2.6.0 and below\": (\"int8\", \"uint8\", \"int16\", \"uint16\", \"float16\", \"bfloat16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef gather_nd(x, index, name=None):\n return ivy.gather_nd(x, index)\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"bool\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef index_add(x, index, axis, value, *, name=None):\n x = ivy.swapaxes(x, axis, 0)\n value = ivy.swapaxes(value, axis, 0)\n _to_adds = []\n index = sorted(zip(ivy.to_list(index), range(len(index))), key=(lambda i: i[0]))\n while index:\n _curr_idx = index[0][0]\n while len(_to_adds) < _curr_idx:\n _to_adds.append(ivy.zeros_like(value[0]))\n _to_add_cum = ivy.get_item(value, index[0][1])\n while (len(index)) > 1 and (index[0][0] == index[1][0]):\n _to_add_cum = _to_add_cum + ivy.get_item(value, index.pop(1)[1])\n index.pop(0)\n _to_adds.append(_to_add_cum)\n while len(_to_adds) < x.shape[0]:\n _to_adds.append(ivy.zeros_like(value[0]))\n _to_adds = ivy.stack(_to_adds)\n if len(x.shape) < 2:\n # Added this line due to the paddle backend treating scalars as 1-d arrays\n _to_adds = ivy.flatten(_to_adds)\n\n ret = ivy.add(x, _to_adds)\n ret = ivy.swapaxes(ret, axis, 0)\n return ret\n\n\n@to_ivy_arrays_and_back\ndef put_along_axis(arr, indices, values, axis, reduce=\"assign\"):\n result = ivy.put_along_axis(arr, indices, values, axis)\n return result\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"int32\", \"int64\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef repeat_interleave(x, repeats, axis=None, name=None):\n return ivy.repeat(x, repeats, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef reshape(x, shape, name=None):\n return ivy.reshape(x, shape)\n\n\n@with_supported_dtypes(\n {\n \"2.5.0 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef roll(x, shifts, axis=None, name=None):\n return ivy.roll(x, shifts, axis=axis)\n\n\n@with_supported_device_and_dtypes(\n {\n \"2.6.0 and above\": {\n \"cpu\": (\n \"bool\",\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n ),\n \"gpu\": (\"float16\",),\n },\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef rot90(x, k=1, axes=(0, 1), name=None):\n return ivy.rot90(x, k=k, axes=axes)\n\n\n@with_unsupported_dtypes(\n {\"2.6.0 and below\": (\"int16\", \"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef split(x, num_or_sections, axis=0, name=None):\n return ivy.split(x, num_or_size_splits=num_or_sections, axis=axis)\n\n\n@with_unsupported_dtypes(\n {\"2.6.0 and below\": (\"float16\", \"bfloat16\", \"int8\", \"int16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef squeeze(x, axis=None, name=None):\n return ivy.squeeze(x, axis=axis)\n\n\n@to_ivy_arrays_and_back\ndef stack(x, axis=0, name=None):\n return ivy.stack(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef take_along_axis(arr, indices, axis):\n return ivy.take_along_axis(arr, indices, axis)\n\n\n@with_unsupported_dtypes(\n {\"2.6.0 and below\": (\"int8\", \"uint8\", \"int16\", \"float16\", \"bfloat16\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef tile(x, repeat_times, name=None):\n return ivy.tile(x, repeats=repeat_times)\n\n\n@to_ivy_arrays_and_back\ndef tolist(x):\n return ivy.to_list(x)\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"bool\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef unbind(input, axis=0):\n shape = list(input.shape)\n num_splits = shape[axis]\n shape.pop(axis)\n return tuple(x.reshape(tuple(shape)) for x in split(input, num_splits, axis=axis))\n\n\n@with_supported_dtypes(\n {\"2.6.0 and below\": (\"bool\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef unique_consecutive(x, axis=0):\n return ivy.unique_consecutive(x, axis=axis)\n\n\n@with_supported_dtypes(\n {\n \"2.6.0 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef unstack(x, axis=0, name=None):\n return ivy.unstack(x, axis=axis)\n\n\nabsolute = abs\n", "path": "ivy/functional/frontends/paddle/manipulation.py"}]} | 3,007 | 160 |
gh_patches_debug_7231 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-797 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Increase the usage of augmented assignment statements
This issue corresponds with [this one](https://github.com/open-telemetry/opentelemetry-python/issues/2258).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 The opentelemetry-instrumentation-asgi package provides an ASGI middleware that can be used
17 on any ASGI framework (such as Django-channels / Quart) to track requests
18 timing through OpenTelemetry.
19
20 Usage (Quart)
21 -------------
22
23 .. code-block:: python
24
25 from quart import Quart
26 from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware
27
28 app = Quart(__name__)
29 app.asgi_app = OpenTelemetryMiddleware(app.asgi_app)
30
31 @app.route("/")
32 async def hello():
33 return "Hello!"
34
35 if __name__ == "__main__":
36 app.run(debug=True)
37
38
39 Usage (Django 3.0)
40 ------------------
41
42 Modify the application's ``asgi.py`` file as shown below.
43
44 .. code-block:: python
45
46 import os
47 from django.core.asgi import get_asgi_application
48 from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware
49
50 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'asgi_example.settings')
51
52 application = get_asgi_application()
53 application = OpenTelemetryMiddleware(application)
54
55
56 Usage (Raw ASGI)
57 ----------------
58
59 .. code-block:: python
60
61 from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware
62
63 app = ... # An ASGI application.
64 app = OpenTelemetryMiddleware(app)
65
66
67 Configuration
68 -------------
69
70 Request/Response hooks
71 **********************
72
73 Utilize request/reponse hooks to execute custom logic to be performed before/after performing a request. The server request hook takes in a server span and ASGI
74 scope object for every incoming request. The client request hook is called with the internal span and an ASGI scope which is sent as a dictionary for when the method recieve is called.
75 The client response hook is called with the internal span and an ASGI event which is sent as a dictionary for when the method send is called.
76
77 .. code-block:: python
78
79 def server_request_hook(span: Span, scope: dict):
80 if span and span.is_recording():
81 span.set_attribute("custom_user_attribute_from_request_hook", "some-value")
82
83 def client_request_hook(span: Span, scope: dict):
84 if span and span.is_recording():
85 span.set_attribute("custom_user_attribute_from_client_request_hook", "some-value")
86
87 def client_response_hook(span: Span, message: dict):
88 if span and span.is_recording():
89 span.set_attribute("custom_user_attribute_from_response_hook", "some-value")
90
91 OpenTelemetryMiddleware().(application, server_request_hook=server_request_hook, client_request_hook=client_request_hook, client_response_hook=client_response_hook)
92
93 API
94 ---
95 """
96
97 import typing
98 import urllib
99 from functools import wraps
100 from typing import Tuple
101
102 from asgiref.compatibility import guarantee_single_callable
103
104 from opentelemetry import context, trace
105 from opentelemetry.instrumentation.asgi.version import __version__ # noqa
106 from opentelemetry.instrumentation.utils import http_status_to_status_code
107 from opentelemetry.propagate import extract
108 from opentelemetry.propagators.textmap import Getter
109 from opentelemetry.semconv.trace import SpanAttributes
110 from opentelemetry.trace import Span
111 from opentelemetry.trace.status import Status, StatusCode
112 from opentelemetry.util.http import remove_url_credentials
113
114 _ServerRequestHookT = typing.Optional[typing.Callable[[Span, dict], None]]
115 _ClientRequestHookT = typing.Optional[typing.Callable[[Span, dict], None]]
116 _ClientResponseHookT = typing.Optional[typing.Callable[[Span, dict], None]]
117
118
119 class ASGIGetter(Getter):
120 def get(
121 self, carrier: dict, key: str
122 ) -> typing.Optional[typing.List[str]]:
123 """Getter implementation to retrieve a HTTP header value from the ASGI
124 scope.
125
126 Args:
127 carrier: ASGI scope object
128 key: header name in scope
129 Returns:
130 A list with a single string with the header value if it exists,
131 else None.
132 """
133 headers = carrier.get("headers")
134 if not headers:
135 return None
136
137 # asgi header keys are in lower case
138 key = key.lower()
139 decoded = [
140 _value.decode("utf8")
141 for (_key, _value) in headers
142 if _key.decode("utf8") == key
143 ]
144 if not decoded:
145 return None
146 return decoded
147
148 def keys(self, carrier: dict) -> typing.List[str]:
149 return list(carrier.keys())
150
151
152 asgi_getter = ASGIGetter()
153
154
155 def collect_request_attributes(scope):
156 """Collects HTTP request attributes from the ASGI scope and returns a
157 dictionary to be used as span creation attributes."""
158 server_host, port, http_url = get_host_port_url_tuple(scope)
159 query_string = scope.get("query_string")
160 if query_string and http_url:
161 if isinstance(query_string, bytes):
162 query_string = query_string.decode("utf8")
163 http_url = http_url + ("?" + urllib.parse.unquote(query_string))
164
165 result = {
166 SpanAttributes.HTTP_SCHEME: scope.get("scheme"),
167 SpanAttributes.HTTP_HOST: server_host,
168 SpanAttributes.NET_HOST_PORT: port,
169 SpanAttributes.HTTP_FLAVOR: scope.get("http_version"),
170 SpanAttributes.HTTP_TARGET: scope.get("path"),
171 SpanAttributes.HTTP_URL: remove_url_credentials(http_url),
172 }
173 http_method = scope.get("method")
174 if http_method:
175 result[SpanAttributes.HTTP_METHOD] = http_method
176
177 http_host_value_list = asgi_getter.get(scope, "host")
178 if http_host_value_list:
179 result[SpanAttributes.HTTP_SERVER_NAME] = ",".join(
180 http_host_value_list
181 )
182 http_user_agent = asgi_getter.get(scope, "user-agent")
183 if http_user_agent:
184 result[SpanAttributes.HTTP_USER_AGENT] = http_user_agent[0]
185
186 if "client" in scope and scope["client"] is not None:
187 result[SpanAttributes.NET_PEER_IP] = scope.get("client")[0]
188 result[SpanAttributes.NET_PEER_PORT] = scope.get("client")[1]
189
190 # remove None values
191 result = {k: v for k, v in result.items() if v is not None}
192
193 return result
194
195
196 def get_host_port_url_tuple(scope):
197 """Returns (host, port, full_url) tuple."""
198 server = scope.get("server") or ["0.0.0.0", 80]
199 port = server[1]
200 server_host = server[0] + (":" + str(port) if str(port) != "80" else "")
201 full_path = scope.get("root_path", "") + scope.get("path", "")
202 http_url = scope.get("scheme", "http") + "://" + server_host + full_path
203 return server_host, port, http_url
204
205
206 def set_status_code(span, status_code):
207 """Adds HTTP response attributes to span using the status_code argument."""
208 if not span.is_recording():
209 return
210 try:
211 status_code = int(status_code)
212 except ValueError:
213 span.set_status(
214 Status(
215 StatusCode.ERROR,
216 "Non-integer HTTP status: " + repr(status_code),
217 )
218 )
219 else:
220 span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code)
221 span.set_status(
222 Status(http_status_to_status_code(status_code, server_span=True))
223 )
224
225
226 def get_default_span_details(scope: dict) -> Tuple[str, dict]:
227 """Default implementation for get_default_span_details
228 Args:
229 scope: the asgi scope dictionary
230 Returns:
231 a tuple of the span name, and any attributes to attach to the span.
232 """
233 span_name = (
234 scope.get("path", "").strip()
235 or f"HTTP {scope.get('method', '').strip()}"
236 )
237
238 return span_name, {}
239
240
241 class OpenTelemetryMiddleware:
242 """The ASGI application middleware.
243
244 This class is an ASGI middleware that starts and annotates spans for any
245 requests it is invoked with.
246
247 Args:
248 app: The ASGI application callable to forward requests to.
249 default_span_details: Callback which should return a string and a tuple, representing the desired default span name and a
250 dictionary with any additional span attributes to set.
251 Optional: Defaults to get_default_span_details.
252 server_request_hook: Optional callback which is called with the server span and ASGI
253 scope object for every incoming request.
254 client_request_hook: Optional callback which is called with the internal span and an ASGI
255 scope which is sent as a dictionary for when the method recieve is called.
256 client_response_hook: Optional callback which is called with the internal span and an ASGI
257 event which is sent as a dictionary for when the method send is called.
258 tracer_provider: The optional tracer provider to use. If omitted
259 the current globally configured one is used.
260 """
261
262 def __init__(
263 self,
264 app,
265 excluded_urls=None,
266 default_span_details=None,
267 server_request_hook: _ServerRequestHookT = None,
268 client_request_hook: _ClientRequestHookT = None,
269 client_response_hook: _ClientResponseHookT = None,
270 tracer_provider=None,
271 ):
272 self.app = guarantee_single_callable(app)
273 self.tracer = trace.get_tracer(__name__, __version__, tracer_provider)
274 self.excluded_urls = excluded_urls
275 self.default_span_details = (
276 default_span_details or get_default_span_details
277 )
278 self.server_request_hook = server_request_hook
279 self.client_request_hook = client_request_hook
280 self.client_response_hook = client_response_hook
281
282 async def __call__(self, scope, receive, send):
283 """The ASGI application
284
285 Args:
286 scope: A ASGI environment.
287 receive: An awaitable callable yielding dictionaries
288 send: An awaitable callable taking a single dictionary as argument.
289 """
290 if scope["type"] not in ("http", "websocket"):
291 return await self.app(scope, receive, send)
292
293 _, _, url = get_host_port_url_tuple(scope)
294 if self.excluded_urls and self.excluded_urls.url_disabled(url):
295 return await self.app(scope, receive, send)
296
297 token = context.attach(extract(scope, getter=asgi_getter))
298 span_name, additional_attributes = self.default_span_details(scope)
299
300 try:
301 with self.tracer.start_as_current_span(
302 span_name,
303 kind=trace.SpanKind.SERVER,
304 ) as span:
305 if span.is_recording():
306 attributes = collect_request_attributes(scope)
307 attributes.update(additional_attributes)
308 for key, value in attributes.items():
309 span.set_attribute(key, value)
310
311 if callable(self.server_request_hook):
312 self.server_request_hook(span, scope)
313
314 @wraps(receive)
315 async def wrapped_receive():
316 with self.tracer.start_as_current_span(
317 " ".join((span_name, scope["type"], "receive"))
318 ) as receive_span:
319 if callable(self.client_request_hook):
320 self.client_request_hook(receive_span, scope)
321 message = await receive()
322 if receive_span.is_recording():
323 if message["type"] == "websocket.receive":
324 set_status_code(receive_span, 200)
325 receive_span.set_attribute("type", message["type"])
326 return message
327
328 @wraps(send)
329 async def wrapped_send(message):
330 with self.tracer.start_as_current_span(
331 " ".join((span_name, scope["type"], "send"))
332 ) as send_span:
333 if callable(self.client_response_hook):
334 self.client_response_hook(send_span, message)
335 if send_span.is_recording():
336 if message["type"] == "http.response.start":
337 status_code = message["status"]
338 set_status_code(span, status_code)
339 set_status_code(send_span, status_code)
340 elif message["type"] == "websocket.send":
341 set_status_code(span, 200)
342 set_status_code(send_span, 200)
343 send_span.set_attribute("type", message["type"])
344 await send(message)
345
346 await self.app(scope, wrapped_receive, wrapped_send)
347 finally:
348 context.detach(token)
349
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py
--- a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py
+++ b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py
@@ -160,7 +160,7 @@
if query_string and http_url:
if isinstance(query_string, bytes):
query_string = query_string.decode("utf8")
- http_url = http_url + ("?" + urllib.parse.unquote(query_string))
+ http_url += "?" + urllib.parse.unquote(query_string)
result = {
SpanAttributes.HTTP_SCHEME: scope.get("scheme"),
| {"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py\n@@ -160,7 +160,7 @@\n if query_string and http_url:\n if isinstance(query_string, bytes):\n query_string = query_string.decode(\"utf8\")\n- http_url = http_url + (\"?\" + urllib.parse.unquote(query_string))\n+ http_url += \"?\" + urllib.parse.unquote(query_string)\n \n result = {\n SpanAttributes.HTTP_SCHEME: scope.get(\"scheme\"),\n", "issue": "Increase the usage of augmented assignment statements\nThis issue corresponds with [this one](https://github.com/open-telemetry/opentelemetry-python/issues/2258).\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe opentelemetry-instrumentation-asgi package provides an ASGI middleware that can be used\non any ASGI framework (such as Django-channels / Quart) to track requests\ntiming through OpenTelemetry.\n\nUsage (Quart)\n-------------\n\n.. code-block:: python\n\n from quart import Quart\n from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware\n\n app = Quart(__name__)\n app.asgi_app = OpenTelemetryMiddleware(app.asgi_app)\n\n @app.route(\"/\")\n async def hello():\n return \"Hello!\"\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n\n\nUsage (Django 3.0)\n------------------\n\nModify the application's ``asgi.py`` file as shown below.\n\n.. code-block:: python\n\n import os\n from django.core.asgi import get_asgi_application\n from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware\n\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'asgi_example.settings')\n\n application = get_asgi_application()\n application = OpenTelemetryMiddleware(application)\n\n\nUsage (Raw ASGI)\n----------------\n\n.. code-block:: python\n\n from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware\n\n app = ... # An ASGI application.\n app = OpenTelemetryMiddleware(app)\n\n\nConfiguration\n-------------\n\nRequest/Response hooks\n**********************\n\nUtilize request/reponse hooks to execute custom logic to be performed before/after performing a request. The server request hook takes in a server span and ASGI\nscope object for every incoming request. The client request hook is called with the internal span and an ASGI scope which is sent as a dictionary for when the method recieve is called.\nThe client response hook is called with the internal span and an ASGI event which is sent as a dictionary for when the method send is called.\n\n.. code-block:: python\n\n def server_request_hook(span: Span, scope: dict):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_request_hook\", \"some-value\")\n\n def client_request_hook(span: Span, scope: dict):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_client_request_hook\", \"some-value\")\n\n def client_response_hook(span: Span, message: dict):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_response_hook\", \"some-value\")\n\n OpenTelemetryMiddleware().(application, server_request_hook=server_request_hook, client_request_hook=client_request_hook, client_response_hook=client_response_hook)\n\nAPI\n---\n\"\"\"\n\nimport typing\nimport urllib\nfrom functools import wraps\nfrom typing import Tuple\n\nfrom asgiref.compatibility import guarantee_single_callable\n\nfrom opentelemetry import context, trace\nfrom opentelemetry.instrumentation.asgi.version import __version__ # noqa\nfrom opentelemetry.instrumentation.utils import http_status_to_status_code\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.propagators.textmap import Getter\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace import Span\nfrom opentelemetry.trace.status import Status, StatusCode\nfrom opentelemetry.util.http import remove_url_credentials\n\n_ServerRequestHookT = typing.Optional[typing.Callable[[Span, dict], None]]\n_ClientRequestHookT = typing.Optional[typing.Callable[[Span, dict], None]]\n_ClientResponseHookT = typing.Optional[typing.Callable[[Span, dict], None]]\n\n\nclass ASGIGetter(Getter):\n def get(\n self, carrier: dict, key: str\n ) -> typing.Optional[typing.List[str]]:\n \"\"\"Getter implementation to retrieve a HTTP header value from the ASGI\n scope.\n\n Args:\n carrier: ASGI scope object\n key: header name in scope\n Returns:\n A list with a single string with the header value if it exists,\n else None.\n \"\"\"\n headers = carrier.get(\"headers\")\n if not headers:\n return None\n\n # asgi header keys are in lower case\n key = key.lower()\n decoded = [\n _value.decode(\"utf8\")\n for (_key, _value) in headers\n if _key.decode(\"utf8\") == key\n ]\n if not decoded:\n return None\n return decoded\n\n def keys(self, carrier: dict) -> typing.List[str]:\n return list(carrier.keys())\n\n\nasgi_getter = ASGIGetter()\n\n\ndef collect_request_attributes(scope):\n \"\"\"Collects HTTP request attributes from the ASGI scope and returns a\n dictionary to be used as span creation attributes.\"\"\"\n server_host, port, http_url = get_host_port_url_tuple(scope)\n query_string = scope.get(\"query_string\")\n if query_string and http_url:\n if isinstance(query_string, bytes):\n query_string = query_string.decode(\"utf8\")\n http_url = http_url + (\"?\" + urllib.parse.unquote(query_string))\n\n result = {\n SpanAttributes.HTTP_SCHEME: scope.get(\"scheme\"),\n SpanAttributes.HTTP_HOST: server_host,\n SpanAttributes.NET_HOST_PORT: port,\n SpanAttributes.HTTP_FLAVOR: scope.get(\"http_version\"),\n SpanAttributes.HTTP_TARGET: scope.get(\"path\"),\n SpanAttributes.HTTP_URL: remove_url_credentials(http_url),\n }\n http_method = scope.get(\"method\")\n if http_method:\n result[SpanAttributes.HTTP_METHOD] = http_method\n\n http_host_value_list = asgi_getter.get(scope, \"host\")\n if http_host_value_list:\n result[SpanAttributes.HTTP_SERVER_NAME] = \",\".join(\n http_host_value_list\n )\n http_user_agent = asgi_getter.get(scope, \"user-agent\")\n if http_user_agent:\n result[SpanAttributes.HTTP_USER_AGENT] = http_user_agent[0]\n\n if \"client\" in scope and scope[\"client\"] is not None:\n result[SpanAttributes.NET_PEER_IP] = scope.get(\"client\")[0]\n result[SpanAttributes.NET_PEER_PORT] = scope.get(\"client\")[1]\n\n # remove None values\n result = {k: v for k, v in result.items() if v is not None}\n\n return result\n\n\ndef get_host_port_url_tuple(scope):\n \"\"\"Returns (host, port, full_url) tuple.\"\"\"\n server = scope.get(\"server\") or [\"0.0.0.0\", 80]\n port = server[1]\n server_host = server[0] + (\":\" + str(port) if str(port) != \"80\" else \"\")\n full_path = scope.get(\"root_path\", \"\") + scope.get(\"path\", \"\")\n http_url = scope.get(\"scheme\", \"http\") + \"://\" + server_host + full_path\n return server_host, port, http_url\n\n\ndef set_status_code(span, status_code):\n \"\"\"Adds HTTP response attributes to span using the status_code argument.\"\"\"\n if not span.is_recording():\n return\n try:\n status_code = int(status_code)\n except ValueError:\n span.set_status(\n Status(\n StatusCode.ERROR,\n \"Non-integer HTTP status: \" + repr(status_code),\n )\n )\n else:\n span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code)\n span.set_status(\n Status(http_status_to_status_code(status_code, server_span=True))\n )\n\n\ndef get_default_span_details(scope: dict) -> Tuple[str, dict]:\n \"\"\"Default implementation for get_default_span_details\n Args:\n scope: the asgi scope dictionary\n Returns:\n a tuple of the span name, and any attributes to attach to the span.\n \"\"\"\n span_name = (\n scope.get(\"path\", \"\").strip()\n or f\"HTTP {scope.get('method', '').strip()}\"\n )\n\n return span_name, {}\n\n\nclass OpenTelemetryMiddleware:\n \"\"\"The ASGI application middleware.\n\n This class is an ASGI middleware that starts and annotates spans for any\n requests it is invoked with.\n\n Args:\n app: The ASGI application callable to forward requests to.\n default_span_details: Callback which should return a string and a tuple, representing the desired default span name and a\n dictionary with any additional span attributes to set.\n Optional: Defaults to get_default_span_details.\n server_request_hook: Optional callback which is called with the server span and ASGI\n scope object for every incoming request.\n client_request_hook: Optional callback which is called with the internal span and an ASGI\n scope which is sent as a dictionary for when the method recieve is called.\n client_response_hook: Optional callback which is called with the internal span and an ASGI\n event which is sent as a dictionary for when the method send is called.\n tracer_provider: The optional tracer provider to use. If omitted\n the current globally configured one is used.\n \"\"\"\n\n def __init__(\n self,\n app,\n excluded_urls=None,\n default_span_details=None,\n server_request_hook: _ServerRequestHookT = None,\n client_request_hook: _ClientRequestHookT = None,\n client_response_hook: _ClientResponseHookT = None,\n tracer_provider=None,\n ):\n self.app = guarantee_single_callable(app)\n self.tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n self.excluded_urls = excluded_urls\n self.default_span_details = (\n default_span_details or get_default_span_details\n )\n self.server_request_hook = server_request_hook\n self.client_request_hook = client_request_hook\n self.client_response_hook = client_response_hook\n\n async def __call__(self, scope, receive, send):\n \"\"\"The ASGI application\n\n Args:\n scope: A ASGI environment.\n receive: An awaitable callable yielding dictionaries\n send: An awaitable callable taking a single dictionary as argument.\n \"\"\"\n if scope[\"type\"] not in (\"http\", \"websocket\"):\n return await self.app(scope, receive, send)\n\n _, _, url = get_host_port_url_tuple(scope)\n if self.excluded_urls and self.excluded_urls.url_disabled(url):\n return await self.app(scope, receive, send)\n\n token = context.attach(extract(scope, getter=asgi_getter))\n span_name, additional_attributes = self.default_span_details(scope)\n\n try:\n with self.tracer.start_as_current_span(\n span_name,\n kind=trace.SpanKind.SERVER,\n ) as span:\n if span.is_recording():\n attributes = collect_request_attributes(scope)\n attributes.update(additional_attributes)\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n if callable(self.server_request_hook):\n self.server_request_hook(span, scope)\n\n @wraps(receive)\n async def wrapped_receive():\n with self.tracer.start_as_current_span(\n \" \".join((span_name, scope[\"type\"], \"receive\"))\n ) as receive_span:\n if callable(self.client_request_hook):\n self.client_request_hook(receive_span, scope)\n message = await receive()\n if receive_span.is_recording():\n if message[\"type\"] == \"websocket.receive\":\n set_status_code(receive_span, 200)\n receive_span.set_attribute(\"type\", message[\"type\"])\n return message\n\n @wraps(send)\n async def wrapped_send(message):\n with self.tracer.start_as_current_span(\n \" \".join((span_name, scope[\"type\"], \"send\"))\n ) as send_span:\n if callable(self.client_response_hook):\n self.client_response_hook(send_span, message)\n if send_span.is_recording():\n if message[\"type\"] == \"http.response.start\":\n status_code = message[\"status\"]\n set_status_code(span, status_code)\n set_status_code(send_span, status_code)\n elif message[\"type\"] == \"websocket.send\":\n set_status_code(span, 200)\n set_status_code(send_span, 200)\n send_span.set_attribute(\"type\", message[\"type\"])\n await send(message)\n\n await self.app(scope, wrapped_receive, wrapped_send)\n finally:\n context.detach(token)\n", "path": "instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe opentelemetry-instrumentation-asgi package provides an ASGI middleware that can be used\non any ASGI framework (such as Django-channels / Quart) to track requests\ntiming through OpenTelemetry.\n\nUsage (Quart)\n-------------\n\n.. code-block:: python\n\n from quart import Quart\n from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware\n\n app = Quart(__name__)\n app.asgi_app = OpenTelemetryMiddleware(app.asgi_app)\n\n @app.route(\"/\")\n async def hello():\n return \"Hello!\"\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n\n\nUsage (Django 3.0)\n------------------\n\nModify the application's ``asgi.py`` file as shown below.\n\n.. code-block:: python\n\n import os\n from django.core.asgi import get_asgi_application\n from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware\n\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'asgi_example.settings')\n\n application = get_asgi_application()\n application = OpenTelemetryMiddleware(application)\n\n\nUsage (Raw ASGI)\n----------------\n\n.. code-block:: python\n\n from opentelemetry.instrumentation.asgi import OpenTelemetryMiddleware\n\n app = ... # An ASGI application.\n app = OpenTelemetryMiddleware(app)\n\n\nConfiguration\n-------------\n\nRequest/Response hooks\n**********************\n\nUtilize request/reponse hooks to execute custom logic to be performed before/after performing a request. The server request hook takes in a server span and ASGI\nscope object for every incoming request. The client request hook is called with the internal span and an ASGI scope which is sent as a dictionary for when the method recieve is called.\nThe client response hook is called with the internal span and an ASGI event which is sent as a dictionary for when the method send is called.\n\n.. code-block:: python\n\n def server_request_hook(span: Span, scope: dict):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_request_hook\", \"some-value\")\n\n def client_request_hook(span: Span, scope: dict):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_client_request_hook\", \"some-value\")\n\n def client_response_hook(span: Span, message: dict):\n if span and span.is_recording():\n span.set_attribute(\"custom_user_attribute_from_response_hook\", \"some-value\")\n\n OpenTelemetryMiddleware().(application, server_request_hook=server_request_hook, client_request_hook=client_request_hook, client_response_hook=client_response_hook)\n\nAPI\n---\n\"\"\"\n\nimport typing\nimport urllib\nfrom functools import wraps\nfrom typing import Tuple\n\nfrom asgiref.compatibility import guarantee_single_callable\n\nfrom opentelemetry import context, trace\nfrom opentelemetry.instrumentation.asgi.version import __version__ # noqa\nfrom opentelemetry.instrumentation.utils import http_status_to_status_code\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.propagators.textmap import Getter\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace import Span\nfrom opentelemetry.trace.status import Status, StatusCode\nfrom opentelemetry.util.http import remove_url_credentials\n\n_ServerRequestHookT = typing.Optional[typing.Callable[[Span, dict], None]]\n_ClientRequestHookT = typing.Optional[typing.Callable[[Span, dict], None]]\n_ClientResponseHookT = typing.Optional[typing.Callable[[Span, dict], None]]\n\n\nclass ASGIGetter(Getter):\n def get(\n self, carrier: dict, key: str\n ) -> typing.Optional[typing.List[str]]:\n \"\"\"Getter implementation to retrieve a HTTP header value from the ASGI\n scope.\n\n Args:\n carrier: ASGI scope object\n key: header name in scope\n Returns:\n A list with a single string with the header value if it exists,\n else None.\n \"\"\"\n headers = carrier.get(\"headers\")\n if not headers:\n return None\n\n # asgi header keys are in lower case\n key = key.lower()\n decoded = [\n _value.decode(\"utf8\")\n for (_key, _value) in headers\n if _key.decode(\"utf8\") == key\n ]\n if not decoded:\n return None\n return decoded\n\n def keys(self, carrier: dict) -> typing.List[str]:\n return list(carrier.keys())\n\n\nasgi_getter = ASGIGetter()\n\n\ndef collect_request_attributes(scope):\n \"\"\"Collects HTTP request attributes from the ASGI scope and returns a\n dictionary to be used as span creation attributes.\"\"\"\n server_host, port, http_url = get_host_port_url_tuple(scope)\n query_string = scope.get(\"query_string\")\n if query_string and http_url:\n if isinstance(query_string, bytes):\n query_string = query_string.decode(\"utf8\")\n http_url += \"?\" + urllib.parse.unquote(query_string)\n\n result = {\n SpanAttributes.HTTP_SCHEME: scope.get(\"scheme\"),\n SpanAttributes.HTTP_HOST: server_host,\n SpanAttributes.NET_HOST_PORT: port,\n SpanAttributes.HTTP_FLAVOR: scope.get(\"http_version\"),\n SpanAttributes.HTTP_TARGET: scope.get(\"path\"),\n SpanAttributes.HTTP_URL: remove_url_credentials(http_url),\n }\n http_method = scope.get(\"method\")\n if http_method:\n result[SpanAttributes.HTTP_METHOD] = http_method\n\n http_host_value_list = asgi_getter.get(scope, \"host\")\n if http_host_value_list:\n result[SpanAttributes.HTTP_SERVER_NAME] = \",\".join(\n http_host_value_list\n )\n http_user_agent = asgi_getter.get(scope, \"user-agent\")\n if http_user_agent:\n result[SpanAttributes.HTTP_USER_AGENT] = http_user_agent[0]\n\n if \"client\" in scope and scope[\"client\"] is not None:\n result[SpanAttributes.NET_PEER_IP] = scope.get(\"client\")[0]\n result[SpanAttributes.NET_PEER_PORT] = scope.get(\"client\")[1]\n\n # remove None values\n result = {k: v for k, v in result.items() if v is not None}\n\n return result\n\n\ndef get_host_port_url_tuple(scope):\n \"\"\"Returns (host, port, full_url) tuple.\"\"\"\n server = scope.get(\"server\") or [\"0.0.0.0\", 80]\n port = server[1]\n server_host = server[0] + (\":\" + str(port) if str(port) != \"80\" else \"\")\n full_path = scope.get(\"root_path\", \"\") + scope.get(\"path\", \"\")\n http_url = scope.get(\"scheme\", \"http\") + \"://\" + server_host + full_path\n return server_host, port, http_url\n\n\ndef set_status_code(span, status_code):\n \"\"\"Adds HTTP response attributes to span using the status_code argument.\"\"\"\n if not span.is_recording():\n return\n try:\n status_code = int(status_code)\n except ValueError:\n span.set_status(\n Status(\n StatusCode.ERROR,\n \"Non-integer HTTP status: \" + repr(status_code),\n )\n )\n else:\n span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code)\n span.set_status(\n Status(http_status_to_status_code(status_code, server_span=True))\n )\n\n\ndef get_default_span_details(scope: dict) -> Tuple[str, dict]:\n \"\"\"Default implementation for get_default_span_details\n Args:\n scope: the asgi scope dictionary\n Returns:\n a tuple of the span name, and any attributes to attach to the span.\n \"\"\"\n span_name = (\n scope.get(\"path\", \"\").strip()\n or f\"HTTP {scope.get('method', '').strip()}\"\n )\n\n return span_name, {}\n\n\nclass OpenTelemetryMiddleware:\n \"\"\"The ASGI application middleware.\n\n This class is an ASGI middleware that starts and annotates spans for any\n requests it is invoked with.\n\n Args:\n app: The ASGI application callable to forward requests to.\n default_span_details: Callback which should return a string and a tuple, representing the desired default span name and a\n dictionary with any additional span attributes to set.\n Optional: Defaults to get_default_span_details.\n server_request_hook: Optional callback which is called with the server span and ASGI\n scope object for every incoming request.\n client_request_hook: Optional callback which is called with the internal span and an ASGI\n scope which is sent as a dictionary for when the method recieve is called.\n client_response_hook: Optional callback which is called with the internal span and an ASGI\n event which is sent as a dictionary for when the method send is called.\n tracer_provider: The optional tracer provider to use. If omitted\n the current globally configured one is used.\n \"\"\"\n\n def __init__(\n self,\n app,\n excluded_urls=None,\n default_span_details=None,\n server_request_hook: _ServerRequestHookT = None,\n client_request_hook: _ClientRequestHookT = None,\n client_response_hook: _ClientResponseHookT = None,\n tracer_provider=None,\n ):\n self.app = guarantee_single_callable(app)\n self.tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n self.excluded_urls = excluded_urls\n self.default_span_details = (\n default_span_details or get_default_span_details\n )\n self.server_request_hook = server_request_hook\n self.client_request_hook = client_request_hook\n self.client_response_hook = client_response_hook\n\n async def __call__(self, scope, receive, send):\n \"\"\"The ASGI application\n\n Args:\n scope: A ASGI environment.\n receive: An awaitable callable yielding dictionaries\n send: An awaitable callable taking a single dictionary as argument.\n \"\"\"\n if scope[\"type\"] not in (\"http\", \"websocket\"):\n return await self.app(scope, receive, send)\n\n _, _, url = get_host_port_url_tuple(scope)\n if self.excluded_urls and self.excluded_urls.url_disabled(url):\n return await self.app(scope, receive, send)\n\n token = context.attach(extract(scope, getter=asgi_getter))\n span_name, additional_attributes = self.default_span_details(scope)\n\n try:\n with self.tracer.start_as_current_span(\n span_name,\n kind=trace.SpanKind.SERVER,\n ) as span:\n if span.is_recording():\n attributes = collect_request_attributes(scope)\n attributes.update(additional_attributes)\n for key, value in attributes.items():\n span.set_attribute(key, value)\n\n if callable(self.server_request_hook):\n self.server_request_hook(span, scope)\n\n @wraps(receive)\n async def wrapped_receive():\n with self.tracer.start_as_current_span(\n \" \".join((span_name, scope[\"type\"], \"receive\"))\n ) as receive_span:\n if callable(self.client_request_hook):\n self.client_request_hook(receive_span, scope)\n message = await receive()\n if receive_span.is_recording():\n if message[\"type\"] == \"websocket.receive\":\n set_status_code(receive_span, 200)\n receive_span.set_attribute(\"type\", message[\"type\"])\n return message\n\n @wraps(send)\n async def wrapped_send(message):\n with self.tracer.start_as_current_span(\n \" \".join((span_name, scope[\"type\"], \"send\"))\n ) as send_span:\n if callable(self.client_response_hook):\n self.client_response_hook(send_span, message)\n if send_span.is_recording():\n if message[\"type\"] == \"http.response.start\":\n status_code = message[\"status\"]\n set_status_code(span, status_code)\n set_status_code(send_span, status_code)\n elif message[\"type\"] == \"websocket.send\":\n set_status_code(span, 200)\n set_status_code(send_span, 200)\n send_span.set_attribute(\"type\", message[\"type\"])\n await send(message)\n\n await self.app(scope, wrapped_receive, wrapped_send)\n finally:\n context.detach(token)\n", "path": "instrumentation/opentelemetry-instrumentation-asgi/src/opentelemetry/instrumentation/asgi/__init__.py"}]} | 3,995 | 198 |
gh_patches_debug_11306 | rasdani/github-patches | git_diff | pallets__werkzeug-681 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
management script utilities is broken in 0.10
In 46ff5e9, the `run_simple` function in `werkzeug/serving.py` adds a new parameter `reloader_type`, which breaks management script utilities.
```
Traceback (most recent call last):
File "manage.py", line 27, in <module>
manager.run()
File "/Users/leemars/.virtualenvs/meiping/lib/python2.7/site-packages/flaskext/actions/__init__.py", line 58, in run
script.run(self._actions, '')
File "/Users/leemars/.virtualenvs/meiping/lib/python2.7/site-packages/werkzeug/script.py", line 177, in run
return func(**arguments)
File "/Users/leemars/.virtualenvs/meiping/lib/python2.7/site-packages/werkzeug/script.py", line 314, in action
static_files=static_files, ssl_context=ssl_context)
File "/Users/leemars/.virtualenvs/meiping/lib/python2.7/site-packages/werkzeug/serving.py", line 622, in run_simple
reloader_type)
File "/Users/leemars/.virtualenvs/meiping/lib/python2.7/site-packages/werkzeug/_reloader.py", line 262, in run_with_reloader
reloader = reloader_loops[reloader_type](extra_files, interval)
KeyError: True
```
I found that in `werkzeug/script.py`, when calling the `run_simple` function, it didn't use named parameters.
I have made a pull request to fix it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `werkzeug/script.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 r'''
3 werkzeug.script
4 ~~~~~~~~~~~~~~~
5
6 .. admonition:: Deprecated Functionality
7
8 ``werkzeug.script`` is deprecated without replacement functionality.
9 Python's command line support improved greatly with :mod:`argparse`
10 and a bunch of alternative modules.
11
12 Most of the time you have recurring tasks while writing an application
13 such as starting up an interactive python interpreter with some prefilled
14 imports, starting the development server, initializing the database or
15 something similar.
16
17 For that purpose werkzeug provides the `werkzeug.script` module which
18 helps you writing such scripts.
19
20
21 Basic Usage
22 -----------
23
24 The following snippet is roughly the same in every werkzeug script::
25
26 #!/usr/bin/env python
27 # -*- coding: utf-8 -*-
28 from werkzeug import script
29
30 # actions go here
31
32 if __name__ == '__main__':
33 script.run()
34
35 Starting this script now does nothing because no actions are defined.
36 An action is a function in the same module starting with ``"action_"``
37 which takes a number of arguments where every argument has a default. The
38 type of the default value specifies the type of the argument.
39
40 Arguments can then be passed by position or using ``--name=value`` from
41 the shell.
42
43 Because a runserver and shell command is pretty common there are two
44 factory functions that create such commands::
45
46 def make_app():
47 from yourapplication import YourApplication
48 return YourApplication(...)
49
50 action_runserver = script.make_runserver(make_app, use_reloader=True)
51 action_shell = script.make_shell(lambda: {'app': make_app()})
52
53
54 Using The Scripts
55 -----------------
56
57 The script from above can be used like this from the shell now:
58
59 .. sourcecode:: text
60
61 $ ./manage.py --help
62 $ ./manage.py runserver localhost 8080 --debugger --no-reloader
63 $ ./manage.py runserver -p 4000
64 $ ./manage.py shell
65
66 As you can see it's possible to pass parameters as positional arguments
67 or as named parameters, pretty much like Python function calls.
68
69
70 :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
71 :license: BSD, see LICENSE for more details.
72 '''
73 from __future__ import print_function
74
75 import sys
76 import inspect
77 import getopt
78 from os.path import basename
79 from werkzeug._compat import iteritems
80
81
82 argument_types = {
83 bool: 'boolean',
84 str: 'string',
85 int: 'integer',
86 float: 'float'
87 }
88
89
90 converters = {
91 'boolean': lambda x: x.lower() in ('1', 'true', 'yes', 'on'),
92 'string': str,
93 'integer': int,
94 'float': float
95 }
96
97
98 def run(namespace=None, action_prefix='action_', args=None):
99 """Run the script. Participating actions are looked up in the caller's
100 namespace if no namespace is given, otherwise in the dict provided.
101 Only items that start with action_prefix are processed as actions. If
102 you want to use all items in the namespace provided as actions set
103 action_prefix to an empty string.
104
105 :param namespace: An optional dict where the functions are looked up in.
106 By default the local namespace of the caller is used.
107 :param action_prefix: The prefix for the functions. Everything else
108 is ignored.
109 :param args: the arguments for the function. If not specified
110 :data:`sys.argv` without the first argument is used.
111 """
112 if namespace is None:
113 namespace = sys._getframe(1).f_locals
114 actions = find_actions(namespace, action_prefix)
115
116 if args is None:
117 args = sys.argv[1:]
118 if not args or args[0] in ('-h', '--help'):
119 return print_usage(actions)
120 elif args[0] not in actions:
121 fail('Unknown action \'%s\'' % args[0])
122
123 arguments = {}
124 types = {}
125 key_to_arg = {}
126 long_options = []
127 formatstring = ''
128 func, doc, arg_def = actions[args.pop(0)]
129 for idx, (arg, shortcut, default, option_type) in enumerate(arg_def):
130 real_arg = arg.replace('-', '_')
131 if shortcut:
132 formatstring += shortcut
133 if not isinstance(default, bool):
134 formatstring += ':'
135 key_to_arg['-' + shortcut] = real_arg
136 long_options.append(isinstance(default, bool) and arg or arg + '=')
137 key_to_arg['--' + arg] = real_arg
138 key_to_arg[idx] = real_arg
139 types[real_arg] = option_type
140 arguments[real_arg] = default
141
142 try:
143 optlist, posargs = getopt.gnu_getopt(args, formatstring, long_options)
144 except getopt.GetoptError as e:
145 fail(str(e))
146
147 specified_arguments = set()
148 for key, value in enumerate(posargs):
149 try:
150 arg = key_to_arg[key]
151 except IndexError:
152 fail('Too many parameters')
153 specified_arguments.add(arg)
154 try:
155 arguments[arg] = converters[types[arg]](value)
156 except ValueError:
157 fail('Invalid value for argument %s (%s): %s' % (key, arg, value))
158
159 for key, value in optlist:
160 arg = key_to_arg[key]
161 if arg in specified_arguments:
162 fail('Argument \'%s\' is specified twice' % arg)
163 if types[arg] == 'boolean':
164 if arg.startswith('no_'):
165 value = 'no'
166 else:
167 value = 'yes'
168 try:
169 arguments[arg] = converters[types[arg]](value)
170 except ValueError:
171 fail('Invalid value for \'%s\': %s' % (key, value))
172
173 newargs = {}
174 for k, v in iteritems(arguments):
175 newargs[k.startswith('no_') and k[3:] or k] = v
176 arguments = newargs
177 return func(**arguments)
178
179
180 def fail(message, code=-1):
181 """Fail with an error."""
182 print('Error: %s' % message, file=sys.stderr)
183 sys.exit(code)
184
185
186 def find_actions(namespace, action_prefix):
187 """Find all the actions in the namespace."""
188 actions = {}
189 for key, value in iteritems(namespace):
190 if key.startswith(action_prefix):
191 actions[key[len(action_prefix):]] = analyse_action(value)
192 return actions
193
194
195 def print_usage(actions):
196 """Print the usage information. (Help screen)"""
197 actions = sorted(iteritems(actions))
198 print('usage: %s <action> [<options>]' % basename(sys.argv[0]))
199 print(' %s --help' % basename(sys.argv[0]))
200 print()
201 print('actions:')
202 for name, (func, doc, arguments) in actions:
203 print(' %s:' % name)
204 for line in doc.splitlines():
205 print(' %s' % line)
206 if arguments:
207 print()
208 for arg, shortcut, default, argtype in arguments:
209 if isinstance(default, bool):
210 print(' %s' % (
211 (shortcut and '-%s, ' % shortcut or '') + '--' + arg
212 ))
213 else:
214 print(' %-30s%-10s%s' % (
215 (shortcut and '-%s, ' % shortcut or '') + '--' + arg,
216 argtype, default
217 ))
218 print()
219
220
221 def analyse_action(func):
222 """Analyse a function."""
223 description = inspect.getdoc(func) or 'undocumented action'
224 arguments = []
225 args, varargs, kwargs, defaults = inspect.getargspec(func)
226 if varargs or kwargs:
227 raise TypeError('variable length arguments for action not allowed.')
228 if len(args) != len(defaults or ()):
229 raise TypeError('not all arguments have proper definitions')
230
231 for idx, (arg, definition) in enumerate(zip(args, defaults or ())):
232 if arg.startswith('_'):
233 raise TypeError('arguments may not start with an underscore')
234 if not isinstance(definition, tuple):
235 shortcut = None
236 default = definition
237 else:
238 shortcut, default = definition
239 argument_type = argument_types[type(default)]
240 if isinstance(default, bool) and default is True:
241 arg = 'no-' + arg
242 arguments.append((arg.replace('_', '-'), shortcut,
243 default, argument_type))
244 return func, description, arguments
245
246
247 def make_shell(init_func=None, banner=None, use_ipython=True):
248 """Returns an action callback that spawns a new interactive
249 python shell.
250
251 :param init_func: an optional initialization function that is
252 called before the shell is started. The return
253 value of this function is the initial namespace.
254 :param banner: the banner that is displayed before the shell. If
255 not specified a generic banner is used instead.
256 :param use_ipython: if set to `True` ipython is used if available.
257 """
258 if banner is None:
259 banner = 'Interactive Werkzeug Shell'
260 if init_func is None:
261 init_func = dict
262 def action(ipython=use_ipython):
263 """Start a new interactive python session."""
264 namespace = init_func()
265 if ipython:
266 try:
267 try:
268 from IPython.frontend.terminal.embed import InteractiveShellEmbed
269 sh = InteractiveShellEmbed(banner1=banner)
270 except ImportError:
271 from IPython.Shell import IPShellEmbed
272 sh = IPShellEmbed(banner=banner)
273 except ImportError:
274 pass
275 else:
276 sh(global_ns={}, local_ns=namespace)
277 return
278 from code import interact
279 interact(banner, local=namespace)
280 return action
281
282
283 def make_runserver(app_factory, hostname='localhost', port=5000,
284 use_reloader=False, use_debugger=False, use_evalex=True,
285 threaded=False, processes=1, static_files=None,
286 extra_files=None, ssl_context=None):
287 """Returns an action callback that spawns a new development server.
288
289 .. versionadded:: 0.5
290 `static_files` and `extra_files` was added.
291
292 ..versionadded:: 0.6.1
293 `ssl_context` was added.
294
295 :param app_factory: a function that returns a new WSGI application.
296 :param hostname: the default hostname the server should listen on.
297 :param port: the default port of the server.
298 :param use_reloader: the default setting for the reloader.
299 :param use_evalex: the default setting for the evalex flag of the debugger.
300 :param threaded: the default threading setting.
301 :param processes: the default number of processes to start.
302 :param static_files: optional dict of static files.
303 :param extra_files: optional list of extra files to track for reloading.
304 :param ssl_context: optional SSL context for running server in HTTPS mode.
305 """
306 def action(hostname=('h', hostname), port=('p', port),
307 reloader=use_reloader, debugger=use_debugger,
308 evalex=use_evalex, threaded=threaded, processes=processes):
309 """Start a new development server."""
310 from werkzeug.serving import run_simple
311 app = app_factory()
312 run_simple(hostname, port, app, reloader, debugger, evalex,
313 extra_files, 1, threaded, processes,
314 static_files=static_files, ssl_context=ssl_context)
315 return action
316
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/werkzeug/script.py b/werkzeug/script.py
--- a/werkzeug/script.py
+++ b/werkzeug/script.py
@@ -309,7 +309,9 @@
"""Start a new development server."""
from werkzeug.serving import run_simple
app = app_factory()
- run_simple(hostname, port, app, reloader, debugger, evalex,
- extra_files, 1, threaded, processes,
+ run_simple(hostname, port, app,
+ use_reloader=reloader, use_debugger=debugger,
+ use_evalex=evalex, extra_files=extra_files,
+ reloader_interval=1, threaded=threaded, processes=processes,
static_files=static_files, ssl_context=ssl_context)
return action
| {"golden_diff": "diff --git a/werkzeug/script.py b/werkzeug/script.py\n--- a/werkzeug/script.py\n+++ b/werkzeug/script.py\n@@ -309,7 +309,9 @@\n \"\"\"Start a new development server.\"\"\"\n from werkzeug.serving import run_simple\n app = app_factory()\n- run_simple(hostname, port, app, reloader, debugger, evalex,\n- extra_files, 1, threaded, processes,\n+ run_simple(hostname, port, app,\n+ use_reloader=reloader, use_debugger=debugger,\n+ use_evalex=evalex, extra_files=extra_files,\n+ reloader_interval=1, threaded=threaded, processes=processes,\n static_files=static_files, ssl_context=ssl_context)\n return action\n", "issue": "management script utilities is broken in 0.10\nIn 46ff5e9, the `run_simple` function in `werkzeug/serving.py` adds a new parameter `reloader_type`, which breaks management script utilities. \n\n```\nTraceback (most recent call last):\n File \"manage.py\", line 27, in <module>\n manager.run()\n File \"/Users/leemars/.virtualenvs/meiping/lib/python2.7/site-packages/flaskext/actions/__init__.py\", line 58, in run\n script.run(self._actions, '')\n File \"/Users/leemars/.virtualenvs/meiping/lib/python2.7/site-packages/werkzeug/script.py\", line 177, in run\n return func(**arguments)\n File \"/Users/leemars/.virtualenvs/meiping/lib/python2.7/site-packages/werkzeug/script.py\", line 314, in action\n static_files=static_files, ssl_context=ssl_context)\n File \"/Users/leemars/.virtualenvs/meiping/lib/python2.7/site-packages/werkzeug/serving.py\", line 622, in run_simple\n reloader_type)\n File \"/Users/leemars/.virtualenvs/meiping/lib/python2.7/site-packages/werkzeug/_reloader.py\", line 262, in run_with_reloader\n reloader = reloader_loops[reloader_type](extra_files, interval)\nKeyError: True\n```\n\nI found that in `werkzeug/script.py`, when calling the `run_simple` function, it didn't use named parameters.\n\nI have made a pull request to fix it.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nr'''\n werkzeug.script\n ~~~~~~~~~~~~~~~\n\n .. admonition:: Deprecated Functionality\n\n ``werkzeug.script`` is deprecated without replacement functionality.\n Python's command line support improved greatly with :mod:`argparse`\n and a bunch of alternative modules.\n\n Most of the time you have recurring tasks while writing an application\n such as starting up an interactive python interpreter with some prefilled\n imports, starting the development server, initializing the database or\n something similar.\n\n For that purpose werkzeug provides the `werkzeug.script` module which\n helps you writing such scripts.\n\n\n Basic Usage\n -----------\n\n The following snippet is roughly the same in every werkzeug script::\n\n #!/usr/bin/env python\n # -*- coding: utf-8 -*-\n from werkzeug import script\n\n # actions go here\n\n if __name__ == '__main__':\n script.run()\n\n Starting this script now does nothing because no actions are defined.\n An action is a function in the same module starting with ``\"action_\"``\n which takes a number of arguments where every argument has a default. The\n type of the default value specifies the type of the argument.\n\n Arguments can then be passed by position or using ``--name=value`` from\n the shell.\n\n Because a runserver and shell command is pretty common there are two\n factory functions that create such commands::\n\n def make_app():\n from yourapplication import YourApplication\n return YourApplication(...)\n\n action_runserver = script.make_runserver(make_app, use_reloader=True)\n action_shell = script.make_shell(lambda: {'app': make_app()})\n\n\n Using The Scripts\n -----------------\n\n The script from above can be used like this from the shell now:\n\n .. sourcecode:: text\n\n $ ./manage.py --help\n $ ./manage.py runserver localhost 8080 --debugger --no-reloader\n $ ./manage.py runserver -p 4000\n $ ./manage.py shell\n\n As you can see it's possible to pass parameters as positional arguments\n or as named parameters, pretty much like Python function calls.\n\n\n :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.\n :license: BSD, see LICENSE for more details.\n'''\nfrom __future__ import print_function\n\nimport sys\nimport inspect\nimport getopt\nfrom os.path import basename\nfrom werkzeug._compat import iteritems\n\n\nargument_types = {\n bool: 'boolean',\n str: 'string',\n int: 'integer',\n float: 'float'\n}\n\n\nconverters = {\n 'boolean': lambda x: x.lower() in ('1', 'true', 'yes', 'on'),\n 'string': str,\n 'integer': int,\n 'float': float\n}\n\n\ndef run(namespace=None, action_prefix='action_', args=None):\n \"\"\"Run the script. Participating actions are looked up in the caller's\n namespace if no namespace is given, otherwise in the dict provided.\n Only items that start with action_prefix are processed as actions. If\n you want to use all items in the namespace provided as actions set\n action_prefix to an empty string.\n\n :param namespace: An optional dict where the functions are looked up in.\n By default the local namespace of the caller is used.\n :param action_prefix: The prefix for the functions. Everything else\n is ignored.\n :param args: the arguments for the function. If not specified\n :data:`sys.argv` without the first argument is used.\n \"\"\"\n if namespace is None:\n namespace = sys._getframe(1).f_locals\n actions = find_actions(namespace, action_prefix)\n\n if args is None:\n args = sys.argv[1:]\n if not args or args[0] in ('-h', '--help'):\n return print_usage(actions)\n elif args[0] not in actions:\n fail('Unknown action \\'%s\\'' % args[0])\n\n arguments = {}\n types = {}\n key_to_arg = {}\n long_options = []\n formatstring = ''\n func, doc, arg_def = actions[args.pop(0)]\n for idx, (arg, shortcut, default, option_type) in enumerate(arg_def):\n real_arg = arg.replace('-', '_')\n if shortcut:\n formatstring += shortcut\n if not isinstance(default, bool):\n formatstring += ':'\n key_to_arg['-' + shortcut] = real_arg\n long_options.append(isinstance(default, bool) and arg or arg + '=')\n key_to_arg['--' + arg] = real_arg\n key_to_arg[idx] = real_arg\n types[real_arg] = option_type\n arguments[real_arg] = default\n\n try:\n optlist, posargs = getopt.gnu_getopt(args, formatstring, long_options)\n except getopt.GetoptError as e:\n fail(str(e))\n\n specified_arguments = set()\n for key, value in enumerate(posargs):\n try:\n arg = key_to_arg[key]\n except IndexError:\n fail('Too many parameters')\n specified_arguments.add(arg)\n try:\n arguments[arg] = converters[types[arg]](value)\n except ValueError:\n fail('Invalid value for argument %s (%s): %s' % (key, arg, value))\n\n for key, value in optlist:\n arg = key_to_arg[key]\n if arg in specified_arguments:\n fail('Argument \\'%s\\' is specified twice' % arg)\n if types[arg] == 'boolean':\n if arg.startswith('no_'):\n value = 'no'\n else:\n value = 'yes'\n try:\n arguments[arg] = converters[types[arg]](value)\n except ValueError:\n fail('Invalid value for \\'%s\\': %s' % (key, value))\n\n newargs = {}\n for k, v in iteritems(arguments):\n newargs[k.startswith('no_') and k[3:] or k] = v\n arguments = newargs\n return func(**arguments)\n\n\ndef fail(message, code=-1):\n \"\"\"Fail with an error.\"\"\"\n print('Error: %s' % message, file=sys.stderr)\n sys.exit(code)\n\n\ndef find_actions(namespace, action_prefix):\n \"\"\"Find all the actions in the namespace.\"\"\"\n actions = {}\n for key, value in iteritems(namespace):\n if key.startswith(action_prefix):\n actions[key[len(action_prefix):]] = analyse_action(value)\n return actions\n\n\ndef print_usage(actions):\n \"\"\"Print the usage information. (Help screen)\"\"\"\n actions = sorted(iteritems(actions))\n print('usage: %s <action> [<options>]' % basename(sys.argv[0]))\n print(' %s --help' % basename(sys.argv[0]))\n print()\n print('actions:')\n for name, (func, doc, arguments) in actions:\n print(' %s:' % name)\n for line in doc.splitlines():\n print(' %s' % line)\n if arguments:\n print()\n for arg, shortcut, default, argtype in arguments:\n if isinstance(default, bool):\n print(' %s' % (\n (shortcut and '-%s, ' % shortcut or '') + '--' + arg\n ))\n else:\n print(' %-30s%-10s%s' % (\n (shortcut and '-%s, ' % shortcut or '') + '--' + arg,\n argtype, default\n ))\n print()\n\n\ndef analyse_action(func):\n \"\"\"Analyse a function.\"\"\"\n description = inspect.getdoc(func) or 'undocumented action'\n arguments = []\n args, varargs, kwargs, defaults = inspect.getargspec(func)\n if varargs or kwargs:\n raise TypeError('variable length arguments for action not allowed.')\n if len(args) != len(defaults or ()):\n raise TypeError('not all arguments have proper definitions')\n\n for idx, (arg, definition) in enumerate(zip(args, defaults or ())):\n if arg.startswith('_'):\n raise TypeError('arguments may not start with an underscore')\n if not isinstance(definition, tuple):\n shortcut = None\n default = definition\n else:\n shortcut, default = definition\n argument_type = argument_types[type(default)]\n if isinstance(default, bool) and default is True:\n arg = 'no-' + arg\n arguments.append((arg.replace('_', '-'), shortcut,\n default, argument_type))\n return func, description, arguments\n\n\ndef make_shell(init_func=None, banner=None, use_ipython=True):\n \"\"\"Returns an action callback that spawns a new interactive\n python shell.\n\n :param init_func: an optional initialization function that is\n called before the shell is started. The return\n value of this function is the initial namespace.\n :param banner: the banner that is displayed before the shell. If\n not specified a generic banner is used instead.\n :param use_ipython: if set to `True` ipython is used if available.\n \"\"\"\n if banner is None:\n banner = 'Interactive Werkzeug Shell'\n if init_func is None:\n init_func = dict\n def action(ipython=use_ipython):\n \"\"\"Start a new interactive python session.\"\"\"\n namespace = init_func()\n if ipython:\n try:\n try:\n from IPython.frontend.terminal.embed import InteractiveShellEmbed\n sh = InteractiveShellEmbed(banner1=banner)\n except ImportError:\n from IPython.Shell import IPShellEmbed\n sh = IPShellEmbed(banner=banner)\n except ImportError:\n pass\n else:\n sh(global_ns={}, local_ns=namespace)\n return\n from code import interact\n interact(banner, local=namespace)\n return action\n\n\ndef make_runserver(app_factory, hostname='localhost', port=5000,\n use_reloader=False, use_debugger=False, use_evalex=True,\n threaded=False, processes=1, static_files=None,\n extra_files=None, ssl_context=None):\n \"\"\"Returns an action callback that spawns a new development server.\n\n .. versionadded:: 0.5\n `static_files` and `extra_files` was added.\n\n ..versionadded:: 0.6.1\n `ssl_context` was added.\n\n :param app_factory: a function that returns a new WSGI application.\n :param hostname: the default hostname the server should listen on.\n :param port: the default port of the server.\n :param use_reloader: the default setting for the reloader.\n :param use_evalex: the default setting for the evalex flag of the debugger.\n :param threaded: the default threading setting.\n :param processes: the default number of processes to start.\n :param static_files: optional dict of static files.\n :param extra_files: optional list of extra files to track for reloading.\n :param ssl_context: optional SSL context for running server in HTTPS mode.\n \"\"\"\n def action(hostname=('h', hostname), port=('p', port),\n reloader=use_reloader, debugger=use_debugger,\n evalex=use_evalex, threaded=threaded, processes=processes):\n \"\"\"Start a new development server.\"\"\"\n from werkzeug.serving import run_simple\n app = app_factory()\n run_simple(hostname, port, app, reloader, debugger, evalex,\n extra_files, 1, threaded, processes,\n static_files=static_files, ssl_context=ssl_context)\n return action\n", "path": "werkzeug/script.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nr'''\n werkzeug.script\n ~~~~~~~~~~~~~~~\n\n .. admonition:: Deprecated Functionality\n\n ``werkzeug.script`` is deprecated without replacement functionality.\n Python's command line support improved greatly with :mod:`argparse`\n and a bunch of alternative modules.\n\n Most of the time you have recurring tasks while writing an application\n such as starting up an interactive python interpreter with some prefilled\n imports, starting the development server, initializing the database or\n something similar.\n\n For that purpose werkzeug provides the `werkzeug.script` module which\n helps you writing such scripts.\n\n\n Basic Usage\n -----------\n\n The following snippet is roughly the same in every werkzeug script::\n\n #!/usr/bin/env python\n # -*- coding: utf-8 -*-\n from werkzeug import script\n\n # actions go here\n\n if __name__ == '__main__':\n script.run()\n\n Starting this script now does nothing because no actions are defined.\n An action is a function in the same module starting with ``\"action_\"``\n which takes a number of arguments where every argument has a default. The\n type of the default value specifies the type of the argument.\n\n Arguments can then be passed by position or using ``--name=value`` from\n the shell.\n\n Because a runserver and shell command is pretty common there are two\n factory functions that create such commands::\n\n def make_app():\n from yourapplication import YourApplication\n return YourApplication(...)\n\n action_runserver = script.make_runserver(make_app, use_reloader=True)\n action_shell = script.make_shell(lambda: {'app': make_app()})\n\n\n Using The Scripts\n -----------------\n\n The script from above can be used like this from the shell now:\n\n .. sourcecode:: text\n\n $ ./manage.py --help\n $ ./manage.py runserver localhost 8080 --debugger --no-reloader\n $ ./manage.py runserver -p 4000\n $ ./manage.py shell\n\n As you can see it's possible to pass parameters as positional arguments\n or as named parameters, pretty much like Python function calls.\n\n\n :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.\n :license: BSD, see LICENSE for more details.\n'''\nfrom __future__ import print_function\n\nimport sys\nimport inspect\nimport getopt\nfrom os.path import basename\nfrom werkzeug._compat import iteritems\n\n\nargument_types = {\n bool: 'boolean',\n str: 'string',\n int: 'integer',\n float: 'float'\n}\n\n\nconverters = {\n 'boolean': lambda x: x.lower() in ('1', 'true', 'yes', 'on'),\n 'string': str,\n 'integer': int,\n 'float': float\n}\n\n\ndef run(namespace=None, action_prefix='action_', args=None):\n \"\"\"Run the script. Participating actions are looked up in the caller's\n namespace if no namespace is given, otherwise in the dict provided.\n Only items that start with action_prefix are processed as actions. If\n you want to use all items in the namespace provided as actions set\n action_prefix to an empty string.\n\n :param namespace: An optional dict where the functions are looked up in.\n By default the local namespace of the caller is used.\n :param action_prefix: The prefix for the functions. Everything else\n is ignored.\n :param args: the arguments for the function. If not specified\n :data:`sys.argv` without the first argument is used.\n \"\"\"\n if namespace is None:\n namespace = sys._getframe(1).f_locals\n actions = find_actions(namespace, action_prefix)\n\n if args is None:\n args = sys.argv[1:]\n if not args or args[0] in ('-h', '--help'):\n return print_usage(actions)\n elif args[0] not in actions:\n fail('Unknown action \\'%s\\'' % args[0])\n\n arguments = {}\n types = {}\n key_to_arg = {}\n long_options = []\n formatstring = ''\n func, doc, arg_def = actions[args.pop(0)]\n for idx, (arg, shortcut, default, option_type) in enumerate(arg_def):\n real_arg = arg.replace('-', '_')\n if shortcut:\n formatstring += shortcut\n if not isinstance(default, bool):\n formatstring += ':'\n key_to_arg['-' + shortcut] = real_arg\n long_options.append(isinstance(default, bool) and arg or arg + '=')\n key_to_arg['--' + arg] = real_arg\n key_to_arg[idx] = real_arg\n types[real_arg] = option_type\n arguments[real_arg] = default\n\n try:\n optlist, posargs = getopt.gnu_getopt(args, formatstring, long_options)\n except getopt.GetoptError as e:\n fail(str(e))\n\n specified_arguments = set()\n for key, value in enumerate(posargs):\n try:\n arg = key_to_arg[key]\n except IndexError:\n fail('Too many parameters')\n specified_arguments.add(arg)\n try:\n arguments[arg] = converters[types[arg]](value)\n except ValueError:\n fail('Invalid value for argument %s (%s): %s' % (key, arg, value))\n\n for key, value in optlist:\n arg = key_to_arg[key]\n if arg in specified_arguments:\n fail('Argument \\'%s\\' is specified twice' % arg)\n if types[arg] == 'boolean':\n if arg.startswith('no_'):\n value = 'no'\n else:\n value = 'yes'\n try:\n arguments[arg] = converters[types[arg]](value)\n except ValueError:\n fail('Invalid value for \\'%s\\': %s' % (key, value))\n\n newargs = {}\n for k, v in iteritems(arguments):\n newargs[k.startswith('no_') and k[3:] or k] = v\n arguments = newargs\n return func(**arguments)\n\n\ndef fail(message, code=-1):\n \"\"\"Fail with an error.\"\"\"\n print('Error: %s' % message, file=sys.stderr)\n sys.exit(code)\n\n\ndef find_actions(namespace, action_prefix):\n \"\"\"Find all the actions in the namespace.\"\"\"\n actions = {}\n for key, value in iteritems(namespace):\n if key.startswith(action_prefix):\n actions[key[len(action_prefix):]] = analyse_action(value)\n return actions\n\n\ndef print_usage(actions):\n \"\"\"Print the usage information. (Help screen)\"\"\"\n actions = sorted(iteritems(actions))\n print('usage: %s <action> [<options>]' % basename(sys.argv[0]))\n print(' %s --help' % basename(sys.argv[0]))\n print()\n print('actions:')\n for name, (func, doc, arguments) in actions:\n print(' %s:' % name)\n for line in doc.splitlines():\n print(' %s' % line)\n if arguments:\n print()\n for arg, shortcut, default, argtype in arguments:\n if isinstance(default, bool):\n print(' %s' % (\n (shortcut and '-%s, ' % shortcut or '') + '--' + arg\n ))\n else:\n print(' %-30s%-10s%s' % (\n (shortcut and '-%s, ' % shortcut or '') + '--' + arg,\n argtype, default\n ))\n print()\n\n\ndef analyse_action(func):\n \"\"\"Analyse a function.\"\"\"\n description = inspect.getdoc(func) or 'undocumented action'\n arguments = []\n args, varargs, kwargs, defaults = inspect.getargspec(func)\n if varargs or kwargs:\n raise TypeError('variable length arguments for action not allowed.')\n if len(args) != len(defaults or ()):\n raise TypeError('not all arguments have proper definitions')\n\n for idx, (arg, definition) in enumerate(zip(args, defaults or ())):\n if arg.startswith('_'):\n raise TypeError('arguments may not start with an underscore')\n if not isinstance(definition, tuple):\n shortcut = None\n default = definition\n else:\n shortcut, default = definition\n argument_type = argument_types[type(default)]\n if isinstance(default, bool) and default is True:\n arg = 'no-' + arg\n arguments.append((arg.replace('_', '-'), shortcut,\n default, argument_type))\n return func, description, arguments\n\n\ndef make_shell(init_func=None, banner=None, use_ipython=True):\n \"\"\"Returns an action callback that spawns a new interactive\n python shell.\n\n :param init_func: an optional initialization function that is\n called before the shell is started. The return\n value of this function is the initial namespace.\n :param banner: the banner that is displayed before the shell. If\n not specified a generic banner is used instead.\n :param use_ipython: if set to `True` ipython is used if available.\n \"\"\"\n if banner is None:\n banner = 'Interactive Werkzeug Shell'\n if init_func is None:\n init_func = dict\n def action(ipython=use_ipython):\n \"\"\"Start a new interactive python session.\"\"\"\n namespace = init_func()\n if ipython:\n try:\n try:\n from IPython.frontend.terminal.embed import InteractiveShellEmbed\n sh = InteractiveShellEmbed(banner1=banner)\n except ImportError:\n from IPython.Shell import IPShellEmbed\n sh = IPShellEmbed(banner=banner)\n except ImportError:\n pass\n else:\n sh(global_ns={}, local_ns=namespace)\n return\n from code import interact\n interact(banner, local=namespace)\n return action\n\n\ndef make_runserver(app_factory, hostname='localhost', port=5000,\n use_reloader=False, use_debugger=False, use_evalex=True,\n threaded=False, processes=1, static_files=None,\n extra_files=None, ssl_context=None):\n \"\"\"Returns an action callback that spawns a new development server.\n\n .. versionadded:: 0.5\n `static_files` and `extra_files` was added.\n\n ..versionadded:: 0.6.1\n `ssl_context` was added.\n\n :param app_factory: a function that returns a new WSGI application.\n :param hostname: the default hostname the server should listen on.\n :param port: the default port of the server.\n :param use_reloader: the default setting for the reloader.\n :param use_evalex: the default setting for the evalex flag of the debugger.\n :param threaded: the default threading setting.\n :param processes: the default number of processes to start.\n :param static_files: optional dict of static files.\n :param extra_files: optional list of extra files to track for reloading.\n :param ssl_context: optional SSL context for running server in HTTPS mode.\n \"\"\"\n def action(hostname=('h', hostname), port=('p', port),\n reloader=use_reloader, debugger=use_debugger,\n evalex=use_evalex, threaded=threaded, processes=processes):\n \"\"\"Start a new development server.\"\"\"\n from werkzeug.serving import run_simple\n app = app_factory()\n run_simple(hostname, port, app,\n use_reloader=reloader, use_debugger=debugger,\n use_evalex=evalex, extra_files=extra_files,\n reloader_interval=1, threaded=threaded, processes=processes,\n static_files=static_files, ssl_context=ssl_context)\n return action\n", "path": "werkzeug/script.py"}]} | 3,988 | 175 |
gh_patches_debug_12462 | rasdani/github-patches | git_diff | dask__distributed-6306 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
parse_stdout.py produces malformed Junit reports
parse_stdout.py has recently stopped working.
This causes Windows timeouts to be reported as a white box instead of a red box in https://dask.org/distributed/test_report.html.
https://github.com/dask/distributed/actions/runs/2293197167
> Publish test results: artifacts/windows-latest-3.10-notci1/pytest.xml#L976Error processing result file: not well-formed (invalid token): line 976, column 93
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `continuous_integration/scripts/parse_stdout.py`
Content:
```
1 """On Windows, pytest-timeout kills off the whole test suite, leaving no junit report
2 behind. Parse the stdout of pytest to generate one.
3 """
4 from __future__ import annotations
5
6 import re
7 import sys
8 from collections import Counter, defaultdict
9 from collections.abc import Iterable
10 from datetime import datetime
11
12 OUTCOMES = {
13 "PASSED",
14 "FAILED",
15 # Test timeout. Marked as a variant of FAILED in the junit report
16 None,
17 # Setup failed or teardown failed.
18 # In the latter case, if the test also failed, show both a FAILED and an ERROR line.
19 "ERROR",
20 # @pytest.mark.skip, @pytest.mark.skipif, or raise pytest.skip()
21 "SKIPPED",
22 # Reported as a variant of SKIPPED in the junit report
23 "XFAIL",
24 # These appear respectively before and after another status. Ignore.
25 "RERUN",
26 "LEAKED",
27 }
28
29
30 def parse_rows(rows: Iterable[str]) -> list[tuple[str, str, set[str | None]]]:
31 match = re.compile(
32 r"(distributed/.*test.*)::([^ ]*)"
33 r"( (.*)(PASSED|FAILED|ERROR|SKIPPED|XFAIL|RERUN|LEAKED).*| )$"
34 )
35
36 out: defaultdict[tuple[str, str], set[str | None]] = defaultdict(set)
37
38 for row in rows:
39 m = match.match(row)
40 if not m:
41 continue
42
43 fname = m.group(1)
44 clsname = fname.replace("/", ".").replace(".py", "").replace("::", ".")
45
46 tname = m.group(2).strip()
47 if m.group(4) and "]" in m.group(4):
48 tname += " " + m.group(4).split("]")[0] + "]"
49
50 outcome = m.group(5)
51 assert outcome in OUTCOMES
52 if outcome not in {"RERUN", "LEAKED"}:
53 out[clsname, tname].add(outcome)
54
55 return [(clsname, tname, outcomes) for (clsname, tname), outcomes in out.items()]
56
57
58 def build_xml(rows: list[tuple[str, str, set[str | None]]]) -> None:
59 cnt = Counter(outcome for _, _, outcomes in rows for outcome in outcomes)
60 timestamp = datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")
61
62 # We could have used ElementTree but it feels like overkill here
63 print('<?xml version="1.0" encoding="utf-8"?>')
64 print("<testsuites>")
65 print(
66 '<testsuite name="distributed" '
67 f'errors="{cnt["ERROR"]}" failures="{cnt["FAILED"] + cnt[None]}" '
68 f'skipped="{cnt["SKIPPED"] + cnt["XFAIL"]}" tests="{sum(cnt.values())}" '
69 f'time="0.0" timestamp="{timestamp}" hostname="">'
70 )
71
72 for clsname, tname, outcomes in rows:
73 print(f'<testcase classname="{clsname}" name="{tname}" time="0.0"', end="")
74 if outcomes == {"PASSED"}:
75 print(" />")
76 elif outcomes == {"FAILED"}:
77 print('><failure message=""></failure></testcase>')
78 elif outcomes == {None}:
79 print('><failure message="pytest-timeout exceeded"></failure></testcase>')
80 elif outcomes == {"ERROR"}:
81 print('><error message="failed on setup"></error></testcase>')
82 elif outcomes == {"PASSED", "ERROR"}:
83 print('><error message="failed on teardown"></error></testcase>')
84 elif outcomes == {"FAILED", "ERROR"}:
85 print(
86 '><failure message=""></failure></testcase>\n'
87 f'<testcase classname="{clsname}" name="{tname}" time="0.0">'
88 '<error message="failed on teardown"></error></testcase>'
89 )
90 elif outcomes == {"SKIPPED"}:
91 print('><skipped type="pytest.skip" message="skip"></skipped></testcase>')
92 elif outcomes == {"XFAIL"}:
93 print('><skipped type="pytest.xfail" message="xfail"></skipped></testcase>')
94 else: # pragma: nocover
95 # This should be unreachable. We would normally raise ValueError, except
96 # that a crash in this script would be pretty much invisible.
97 print(
98 f' />\n<testcase classname="parse_stdout" name="build_xml" time="0.0">'
99 f'><failure message="Unexpected {outcomes=}"></failure></testcase>'
100 )
101
102 print("</testsuite>")
103 print("</testsuites>")
104
105
106 def main() -> None: # pragma: nocover
107 build_xml(parse_rows(sys.stdin))
108
109
110 if __name__ == "__main__":
111 main() # pragma: nocover
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/continuous_integration/scripts/parse_stdout.py b/continuous_integration/scripts/parse_stdout.py
--- a/continuous_integration/scripts/parse_stdout.py
+++ b/continuous_integration/scripts/parse_stdout.py
@@ -3,6 +3,7 @@
"""
from __future__ import annotations
+import html
import re
import sys
from collections import Counter, defaultdict
@@ -70,6 +71,8 @@
)
for clsname, tname, outcomes in rows:
+ clsname = html.escape(clsname)
+ tname = html.escape(tname)
print(f'<testcase classname="{clsname}" name="{tname}" time="0.0"', end="")
if outcomes == {"PASSED"}:
print(" />")
| {"golden_diff": "diff --git a/continuous_integration/scripts/parse_stdout.py b/continuous_integration/scripts/parse_stdout.py\n--- a/continuous_integration/scripts/parse_stdout.py\n+++ b/continuous_integration/scripts/parse_stdout.py\n@@ -3,6 +3,7 @@\n \"\"\"\n from __future__ import annotations\n \n+import html\n import re\n import sys\n from collections import Counter, defaultdict\n@@ -70,6 +71,8 @@\n )\n \n for clsname, tname, outcomes in rows:\n+ clsname = html.escape(clsname)\n+ tname = html.escape(tname)\n print(f'<testcase classname=\"{clsname}\" name=\"{tname}\" time=\"0.0\"', end=\"\")\n if outcomes == {\"PASSED\"}:\n print(\" />\")\n", "issue": "parse_stdout.py produces malformed Junit reports\nparse_stdout.py has recently stopped working.\r\nThis causes Windows timeouts to be reported as a white box instead of a red box in https://dask.org/distributed/test_report.html.\r\n\r\nhttps://github.com/dask/distributed/actions/runs/2293197167\r\n> Publish test results:\u00a0artifacts/windows-latest-3.10-notci1/pytest.xml#L976Error processing result file: not well-formed (invalid token): line 976, column 93\r\n\n", "before_files": [{"content": "\"\"\"On Windows, pytest-timeout kills off the whole test suite, leaving no junit report\nbehind. Parse the stdout of pytest to generate one.\n\"\"\"\nfrom __future__ import annotations\n\nimport re\nimport sys\nfrom collections import Counter, defaultdict\nfrom collections.abc import Iterable\nfrom datetime import datetime\n\nOUTCOMES = {\n \"PASSED\",\n \"FAILED\",\n # Test timeout. Marked as a variant of FAILED in the junit report\n None,\n # Setup failed or teardown failed.\n # In the latter case, if the test also failed, show both a FAILED and an ERROR line.\n \"ERROR\",\n # @pytest.mark.skip, @pytest.mark.skipif, or raise pytest.skip()\n \"SKIPPED\",\n # Reported as a variant of SKIPPED in the junit report\n \"XFAIL\",\n # These appear respectively before and after another status. Ignore.\n \"RERUN\",\n \"LEAKED\",\n}\n\n\ndef parse_rows(rows: Iterable[str]) -> list[tuple[str, str, set[str | None]]]:\n match = re.compile(\n r\"(distributed/.*test.*)::([^ ]*)\"\n r\"( (.*)(PASSED|FAILED|ERROR|SKIPPED|XFAIL|RERUN|LEAKED).*| )$\"\n )\n\n out: defaultdict[tuple[str, str], set[str | None]] = defaultdict(set)\n\n for row in rows:\n m = match.match(row)\n if not m:\n continue\n\n fname = m.group(1)\n clsname = fname.replace(\"/\", \".\").replace(\".py\", \"\").replace(\"::\", \".\")\n\n tname = m.group(2).strip()\n if m.group(4) and \"]\" in m.group(4):\n tname += \" \" + m.group(4).split(\"]\")[0] + \"]\"\n\n outcome = m.group(5)\n assert outcome in OUTCOMES\n if outcome not in {\"RERUN\", \"LEAKED\"}:\n out[clsname, tname].add(outcome)\n\n return [(clsname, tname, outcomes) for (clsname, tname), outcomes in out.items()]\n\n\ndef build_xml(rows: list[tuple[str, str, set[str | None]]]) -> None:\n cnt = Counter(outcome for _, _, outcomes in rows for outcome in outcomes)\n timestamp = datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n\n # We could have used ElementTree but it feels like overkill here\n print('<?xml version=\"1.0\" encoding=\"utf-8\"?>')\n print(\"<testsuites>\")\n print(\n '<testsuite name=\"distributed\" '\n f'errors=\"{cnt[\"ERROR\"]}\" failures=\"{cnt[\"FAILED\"] + cnt[None]}\" '\n f'skipped=\"{cnt[\"SKIPPED\"] + cnt[\"XFAIL\"]}\" tests=\"{sum(cnt.values())}\" '\n f'time=\"0.0\" timestamp=\"{timestamp}\" hostname=\"\">'\n )\n\n for clsname, tname, outcomes in rows:\n print(f'<testcase classname=\"{clsname}\" name=\"{tname}\" time=\"0.0\"', end=\"\")\n if outcomes == {\"PASSED\"}:\n print(\" />\")\n elif outcomes == {\"FAILED\"}:\n print('><failure message=\"\"></failure></testcase>')\n elif outcomes == {None}:\n print('><failure message=\"pytest-timeout exceeded\"></failure></testcase>')\n elif outcomes == {\"ERROR\"}:\n print('><error message=\"failed on setup\"></error></testcase>')\n elif outcomes == {\"PASSED\", \"ERROR\"}:\n print('><error message=\"failed on teardown\"></error></testcase>')\n elif outcomes == {\"FAILED\", \"ERROR\"}:\n print(\n '><failure message=\"\"></failure></testcase>\\n'\n f'<testcase classname=\"{clsname}\" name=\"{tname}\" time=\"0.0\">'\n '<error message=\"failed on teardown\"></error></testcase>'\n )\n elif outcomes == {\"SKIPPED\"}:\n print('><skipped type=\"pytest.skip\" message=\"skip\"></skipped></testcase>')\n elif outcomes == {\"XFAIL\"}:\n print('><skipped type=\"pytest.xfail\" message=\"xfail\"></skipped></testcase>')\n else: # pragma: nocover\n # This should be unreachable. We would normally raise ValueError, except\n # that a crash in this script would be pretty much invisible.\n print(\n f' />\\n<testcase classname=\"parse_stdout\" name=\"build_xml\" time=\"0.0\">'\n f'><failure message=\"Unexpected {outcomes=}\"></failure></testcase>'\n )\n\n print(\"</testsuite>\")\n print(\"</testsuites>\")\n\n\ndef main() -> None: # pragma: nocover\n build_xml(parse_rows(sys.stdin))\n\n\nif __name__ == \"__main__\":\n main() # pragma: nocover\n", "path": "continuous_integration/scripts/parse_stdout.py"}], "after_files": [{"content": "\"\"\"On Windows, pytest-timeout kills off the whole test suite, leaving no junit report\nbehind. Parse the stdout of pytest to generate one.\n\"\"\"\nfrom __future__ import annotations\n\nimport html\nimport re\nimport sys\nfrom collections import Counter, defaultdict\nfrom collections.abc import Iterable\nfrom datetime import datetime\n\nOUTCOMES = {\n \"PASSED\",\n \"FAILED\",\n # Test timeout. Marked as a variant of FAILED in the junit report\n None,\n # Setup failed or teardown failed.\n # In the latter case, if the test also failed, show both a FAILED and an ERROR line.\n \"ERROR\",\n # @pytest.mark.skip, @pytest.mark.skipif, or raise pytest.skip()\n \"SKIPPED\",\n # Reported as a variant of SKIPPED in the junit report\n \"XFAIL\",\n # These appear respectively before and after another status. Ignore.\n \"RERUN\",\n \"LEAKED\",\n}\n\n\ndef parse_rows(rows: Iterable[str]) -> list[tuple[str, str, set[str | None]]]:\n match = re.compile(\n r\"(distributed/.*test.*)::([^ ]*)\"\n r\"( (.*)(PASSED|FAILED|ERROR|SKIPPED|XFAIL|RERUN|LEAKED).*| )$\"\n )\n\n out: defaultdict[tuple[str, str], set[str | None]] = defaultdict(set)\n\n for row in rows:\n m = match.match(row)\n if not m:\n continue\n\n fname = m.group(1)\n clsname = fname.replace(\"/\", \".\").replace(\".py\", \"\").replace(\"::\", \".\")\n\n tname = m.group(2).strip()\n if m.group(4) and \"]\" in m.group(4):\n tname += \" \" + m.group(4).split(\"]\")[0] + \"]\"\n\n outcome = m.group(5)\n assert outcome in OUTCOMES\n if outcome not in {\"RERUN\", \"LEAKED\"}:\n out[clsname, tname].add(outcome)\n\n return [(clsname, tname, outcomes) for (clsname, tname), outcomes in out.items()]\n\n\ndef build_xml(rows: list[tuple[str, str, set[str | None]]]) -> None:\n cnt = Counter(outcome for _, _, outcomes in rows for outcome in outcomes)\n timestamp = datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%f\")\n\n # We could have used ElementTree but it feels like overkill here\n print('<?xml version=\"1.0\" encoding=\"utf-8\"?>')\n print(\"<testsuites>\")\n print(\n '<testsuite name=\"distributed\" '\n f'errors=\"{cnt[\"ERROR\"]}\" failures=\"{cnt[\"FAILED\"] + cnt[None]}\" '\n f'skipped=\"{cnt[\"SKIPPED\"] + cnt[\"XFAIL\"]}\" tests=\"{sum(cnt.values())}\" '\n f'time=\"0.0\" timestamp=\"{timestamp}\" hostname=\"\">'\n )\n\n for clsname, tname, outcomes in rows:\n clsname = html.escape(clsname)\n tname = html.escape(tname)\n print(f'<testcase classname=\"{clsname}\" name=\"{tname}\" time=\"0.0\"', end=\"\")\n if outcomes == {\"PASSED\"}:\n print(\" />\")\n elif outcomes == {\"FAILED\"}:\n print('><failure message=\"\"></failure></testcase>')\n elif outcomes == {None}:\n print('><failure message=\"pytest-timeout exceeded\"></failure></testcase>')\n elif outcomes == {\"ERROR\"}:\n print('><error message=\"failed on setup\"></error></testcase>')\n elif outcomes == {\"PASSED\", \"ERROR\"}:\n print('><error message=\"failed on teardown\"></error></testcase>')\n elif outcomes == {\"FAILED\", \"ERROR\"}:\n print(\n '><failure message=\"\"></failure></testcase>\\n'\n f'<testcase classname=\"{clsname}\" name=\"{tname}\" time=\"0.0\">'\n '<error message=\"failed on teardown\"></error></testcase>'\n )\n elif outcomes == {\"SKIPPED\"}:\n print('><skipped type=\"pytest.skip\" message=\"skip\"></skipped></testcase>')\n elif outcomes == {\"XFAIL\"}:\n print('><skipped type=\"pytest.xfail\" message=\"xfail\"></skipped></testcase>')\n else: # pragma: nocover\n # This should be unreachable. We would normally raise ValueError, except\n # that a crash in this script would be pretty much invisible.\n print(\n f' />\\n<testcase classname=\"parse_stdout\" name=\"build_xml\" time=\"0.0\">'\n f'><failure message=\"Unexpected {outcomes=}\"></failure></testcase>'\n )\n\n print(\"</testsuite>\")\n print(\"</testsuites>\")\n\n\ndef main() -> None: # pragma: nocover\n build_xml(parse_rows(sys.stdin))\n\n\nif __name__ == \"__main__\":\n main() # pragma: nocover\n", "path": "continuous_integration/scripts/parse_stdout.py"}]} | 1,680 | 165 |
gh_patches_debug_60692 | rasdani/github-patches | git_diff | biolab__orange3-text-353 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Concordance: Index Error
<!--
This is an issue template. Please fill in the relevant details in the
sections below.
-->
##### Text version
<!-- From menu _Options→Add-ons→Orange3-Text_ or code `orangecontrib.text.version.full_version` -->
0.3.0
##### Orange version
<!-- From menu _Help→About→Version_ or code `Orange.version.full_version` -->
3.15.dev
##### Expected behavior
No crash.
##### Actual behavior
Concordance crashes when a document is selected and the user removes query.
##### Steps to reproduce the behavior
Corpus (bookexcertps) - query for word - select a document from the visualization - remove the word (delete a character).
##### Additional info (worksheets, data, screenshots, ...)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `orangecontrib/text/widgets/owconcordance.py`
Content:
```
1 from typing import Optional
2
3 from itertools import chain
4 from AnyQt.QtCore import Qt, QAbstractTableModel, QSize, QItemSelectionModel, \
5 QItemSelection, QModelIndex
6 from AnyQt.QtWidgets import QSizePolicy, QApplication, QTableView, \
7 QStyledItemDelegate
8 from AnyQt.QtGui import QColor
9
10 from Orange.widgets import gui
11 from Orange.widgets.settings import Setting, ContextSetting, PerfectDomainContextHandler
12 from Orange.widgets.widget import OWWidget, Msg, Input, Output
13 from nltk import ConcordanceIndex
14 from orangecontrib.text.corpus import Corpus
15 from orangecontrib.text.topics import Topic
16 from orangecontrib.text.preprocess import WordPunctTokenizer
17
18
19 class HorizontalGridDelegate(QStyledItemDelegate):
20 """Class for setting elide."""
21
22 def paint(self, painter, option, index):
23 if index.column() == 0:
24 option.textElideMode = Qt.ElideLeft
25 elif index.column() == 2:
26 option.textElideMode = Qt.ElideRight
27 QStyledItemDelegate.paint(self, painter, option, index)
28
29
30 class DocumentSelectionModel(QItemSelectionModel):
31 """Sets selection for QTableView. Creates a set of selected documents."""
32
33 def select(self, selection, flags):
34 # which rows have been selected
35 indexes = selection.indexes() if isinstance(selection, QItemSelection) \
36 else [selection]
37 # prevent crashing when deleting the connection
38 if not indexes:
39 super().select(selection, flags)
40 return
41 # indexes[0].row() == -1 indicates clicking outside of the table
42 if len(indexes) == 1 and indexes[0].row() == -1:
43 self.clear()
44 return
45 word_index = self.model().word_index
46 selected_docs = {word_index[index.row()][0] for index in indexes}
47 selected_rows = [
48 row_index for row_index, (doc_index, _) in enumerate(word_index)
49 if doc_index in selected_docs]
50 selection = QItemSelection()
51 # select all rows belonging to the selected document
52 for row in selected_rows:
53 index = self.model().index(row, 0)
54 selection.select(index, index)
55 super().select(selection, flags)
56
57
58 class ConcordanceModel(QAbstractTableModel):
59 """A model for constructing concordances from text."""
60
61 def __init__(self):
62 QAbstractTableModel.__init__(self)
63 self.word = None
64 self.corpus = None
65 self.tokens = None
66 self.n_tokens = None
67 self.n_types = None
68 self.indices = None
69 self.word_index = None
70 self.width = 8
71 self.colored_rows = None
72
73 def set_word(self, word):
74 self.modelAboutToBeReset.emit()
75 self.word = word
76 self._compute_word_index()
77 self.modelReset.emit()
78
79 def set_corpus(self, corpus):
80 self.modelAboutToBeReset.emit()
81 self.corpus = corpus
82 self.set_tokens()
83 self._compute_indices()
84 self._compute_word_index()
85 self.modelReset.emit()
86
87 def set_tokens(self):
88 if self.corpus is None:
89 self.tokens = None
90 return
91 tokenizer = WordPunctTokenizer()
92 self.tokens = tokenizer(self.corpus.documents)
93 self.n_tokens = sum(map(len, self.tokens))
94 self.n_types = len(set(chain.from_iterable(self.tokens)))
95
96 def set_width(self, width):
97 self.modelAboutToBeReset.emit()
98 self.width = width
99 self.modelReset.emit()
100
101 def flags(self, _):
102 return Qt.ItemIsEnabled | Qt.ItemIsSelectable
103
104 def rowCount(self, parent=QModelIndex(), *args, **kwargs):
105 return 0 if parent.isValid() or self.word_index is None else len(self.word_index)
106
107 def columnCount(self, parent=None, *args, **kwargs):
108 return 3
109
110 def data(self, index, role=Qt.DisplayRole):
111 row, col = index.row(), index.column()
112 doc, index = self.word_index[row]
113
114 if role == Qt.DisplayRole:
115 tokens = self.tokens
116 if col == 0:
117 return ' '.join(tokens[doc][max(index - self.width, 0):index])
118 if col == 1:
119 return tokens[doc][index]
120 if col == 2:
121 return ' '.join(tokens[doc][index + 1:index + self.width + 1])
122
123 elif role == Qt.TextAlignmentRole:
124 return [Qt.AlignRight | Qt.AlignVCenter,
125 Qt.AlignCenter,
126 Qt.AlignLeft | Qt.AlignVCenter][col]
127
128 elif role == Qt.BackgroundRole:
129 const = self.word_index[row][0] in self.colored_rows
130 return QColor(236 + 19 * const, 243 + 12 * const, 255)
131
132 def _compute_indices(self): # type: () -> Optional[None, list]
133 if self.corpus is None:
134 self.indices = None
135 return
136 self.indices = [ConcordanceIndex(doc, key=lambda x: x.lower())
137 for doc in self.tokens]
138
139 def _compute_word_index(self):
140 if self.indices is None or self.word is None:
141 self.word_index = self.colored_rows = None
142 else:
143 self.word_index = [
144 (doc_idx, offset) for doc_idx, doc in enumerate(self.indices)
145 for offset in doc.offsets(self.word)]
146 self.colored_rows = set(sorted({d[0] for d in self.word_index})[::2])
147
148 def matching_docs(self):
149 if self.indices and self.word:
150 return sum(bool(doc.offsets(self.word)) for doc in self.indices)
151 else:
152 return 0
153
154
155 class OWConcordance(OWWidget):
156 name = "Concordance"
157 description = "Display the context of the word."
158 icon = "icons/Concordance.svg"
159 priority = 520
160
161 class Inputs:
162 corpus = Input("Corpus", Corpus)
163 query_word = Input("Query Word", Topic)
164
165 class Outputs:
166 selected_documents = Output("Selected Documents", Corpus)
167
168 settingsHandler = PerfectDomainContextHandler(
169 match_values = PerfectDomainContextHandler.MATCH_VALUES_ALL
170 )
171 autocommit = Setting(True)
172 context_width = Setting(5)
173 word = ContextSetting("", exclude_metas=False)
174 selected_rows = Setting([], schema_only=True)
175
176 class Warning(OWWidget.Warning):
177 multiple_words_on_input = Msg("Multiple query words on input. "
178 "Only the first one is considered!")
179
180 def __init__(self):
181 super().__init__()
182
183 self.corpus = None # Corpus
184 self.n_matching = '' # Info on docs matching the word
185 self.n_tokens = '' # Info on tokens
186 self.n_types = '' # Info on types (unique tokens)
187 self.is_word_on_input = False
188
189 # Info attributes
190 info_box = gui.widgetBox(self.controlArea, 'Info')
191 gui.label(info_box, self, 'Tokens: %(n_tokens)s')
192 gui.label(info_box, self, 'Types: %(n_types)s')
193 gui.label(info_box, self, 'Matching: %(n_matching)s')
194
195 # Width parameter
196 gui.spin(self.controlArea, self, 'context_width', 3, 10, box=True,
197 label="Number of words:", callback=self.set_width)
198
199 gui.rubber(self.controlArea)
200
201 # Search
202 c_box = gui.widgetBox(self.mainArea, orientation="vertical")
203 self.input = gui.lineEdit(
204 c_box, self, 'word', orientation=Qt.Horizontal,
205 sizePolicy=QSizePolicy(QSizePolicy.MinimumExpanding,
206 QSizePolicy.Fixed),
207 label='Query:', callback=self.set_word, callbackOnType=True)
208 self.input.setFocus()
209
210 # Concordances view
211 self.conc_view = QTableView()
212 self.model = ConcordanceModel()
213 self.conc_view.setModel(self.model)
214 self.conc_view.setWordWrap(False)
215 self.conc_view.setSelectionBehavior(QTableView.SelectRows)
216 self.conc_view.setSelectionModel(DocumentSelectionModel(self.model))
217 self.conc_view.setItemDelegate(HorizontalGridDelegate())
218 self.conc_view.selectionModel().selectionChanged.connect(self.selection_changed)
219 self.conc_view.horizontalHeader().hide()
220 self.conc_view.setShowGrid(False)
221 self.mainArea.layout().addWidget(self.conc_view)
222 self.set_width()
223
224 # Auto-commit box
225 gui.auto_commit(self.controlArea, self, 'autocommit', 'Commit',
226 'Auto commit is on')
227
228 def sizeHint(self): # pragma: no cover
229 return QSize(600, 400)
230
231 def set_width(self):
232 sel = self.conc_view.selectionModel().selection()
233 self.model.set_width(self.context_width)
234 if sel:
235 self.conc_view.selectionModel().select(sel,
236 QItemSelectionModel.SelectCurrent | QItemSelectionModel.Rows)
237
238 def selection_changed(self):
239 selection = self.conc_view.selectionModel().selection()
240 self.selected_rows = sorted(set(cell.row() for cell in selection.indexes()))
241 self.commit()
242
243 def set_selection(self, selection):
244 if selection:
245 sel = QItemSelection()
246 for row in selection:
247 index = self.conc_view.model().index(row, 0)
248 sel.select(index, index)
249 self.conc_view.selectionModel().select(sel,
250 QItemSelectionModel.SelectCurrent | QItemSelectionModel.Rows)
251
252 @Inputs.corpus
253 def set_corpus(self, data=None):
254 self.closeContext()
255 self.corpus = data
256 if data is None: # data removed, clear selection
257 self.selected_rows = []
258
259 if not self.is_word_on_input:
260 self.word = ""
261 self.openContext(self.corpus)
262
263 self.model.set_corpus(self.corpus)
264 self.set_word()
265
266 @Inputs.query_word
267 def set_word_from_input(self, topic):
268 self.Warning.multiple_words_on_input.clear()
269 if self.is_word_on_input: # word changed, clear selection
270 self.selected_rows = []
271 self.is_word_on_input = topic is not None and len(topic) > 0
272 self.input.setEnabled(not self.is_word_on_input)
273 if self.is_word_on_input:
274 if len(topic) > 1:
275 self.Warning.multiple_words_on_input()
276 self.word = topic.metas[0, 0]
277 self.set_word()
278
279 def set_word(self):
280 self.model.set_word(self.word)
281 self.update_widget()
282 self.commit()
283
284 def handleNewSignals(self):
285 self.set_selection(self.selected_rows)
286
287 def resize_columns(self):
288 col_width = (self.conc_view.width() -
289 self.conc_view.columnWidth(1)) / 2 - 12
290 self.conc_view.setColumnWidth(0, col_width)
291 self.conc_view.setColumnWidth(2, col_width)
292
293 def resizeEvent(self, event): # pragma: no cover
294 super().resizeEvent(event)
295 self.resize_columns()
296
297 def update_widget(self):
298 self.conc_view.resizeColumnToContents(1)
299 self.resize_columns()
300 self.conc_view.resizeRowsToContents()
301
302 if self.corpus is not None:
303 self.n_matching = '{}/{}'.format(
304 self.model.matching_docs() if self.word else 0,
305 len(self.corpus))
306 self.n_tokens = self.model.n_tokens
307 self.n_types = self.model.n_types
308 else:
309 self.n_matching = ''
310 self.n_tokens = ''
311 self.n_types = ''
312
313 def commit(self):
314 selected_docs = sorted(set(self.model.word_index[row][0]
315 for row in self.selected_rows))
316 if selected_docs:
317 selected = self.corpus[selected_docs]
318 self.Outputs.selected_documents.send(selected)
319 else:
320 self.Outputs.selected_documents.send(None)
321
322 def send_report(self):
323 view = self.conc_view
324 model = self.conc_view.model()
325 self.report_items("Concordances", (
326 ("Query", model.word),
327 ("Tokens", model.n_tokens),
328 ("Types", model.n_types),
329 ("Matching", self.n_matching),
330 ))
331 self.report_table(view)
332
333
334 if __name__ == '__main__': # pragma: no cover
335 app = QApplication([])
336 widget = OWConcordance()
337 corpus = Corpus.from_file('book-excerpts')
338 corpus = corpus[:3]
339 widget.set_corpus(corpus)
340 widget.show()
341 app.exec()
342
343
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/orangecontrib/text/widgets/owconcordance.py b/orangecontrib/text/widgets/owconcordance.py
--- a/orangecontrib/text/widgets/owconcordance.py
+++ b/orangecontrib/text/widgets/owconcordance.py
@@ -277,6 +277,7 @@
self.set_word()
def set_word(self):
+ self.selected_rows = []
self.model.set_word(self.word)
self.update_widget()
self.commit()
| {"golden_diff": "diff --git a/orangecontrib/text/widgets/owconcordance.py b/orangecontrib/text/widgets/owconcordance.py\n--- a/orangecontrib/text/widgets/owconcordance.py\n+++ b/orangecontrib/text/widgets/owconcordance.py\n@@ -277,6 +277,7 @@\n self.set_word()\n \n def set_word(self):\n+ self.selected_rows = []\n self.model.set_word(self.word)\n self.update_widget()\n self.commit()\n", "issue": "Concordance: Index Error\n<!--\r\nThis is an issue template. Please fill in the relevant details in the\r\nsections below.\r\n-->\r\n\r\n##### Text version\r\n<!-- From menu _Options\u2192Add-ons\u2192Orange3-Text_ or code `orangecontrib.text.version.full_version` -->\r\n0.3.0\r\n\r\n##### Orange version\r\n<!-- From menu _Help\u2192About\u2192Version_ or code `Orange.version.full_version` -->\r\n3.15.dev\r\n\r\n##### Expected behavior\r\nNo crash.\r\n\r\n\r\n##### Actual behavior\r\nConcordance crashes when a document is selected and the user removes query.\r\n\r\n\r\n##### Steps to reproduce the behavior\r\nCorpus (bookexcertps) - query for word - select a document from the visualization - remove the word (delete a character).\r\n\r\n\r\n##### Additional info (worksheets, data, screenshots, ...)\r\n\r\n\r\n\n", "before_files": [{"content": "from typing import Optional\n\nfrom itertools import chain\nfrom AnyQt.QtCore import Qt, QAbstractTableModel, QSize, QItemSelectionModel, \\\n QItemSelection, QModelIndex\nfrom AnyQt.QtWidgets import QSizePolicy, QApplication, QTableView, \\\n QStyledItemDelegate\nfrom AnyQt.QtGui import QColor\n\nfrom Orange.widgets import gui\nfrom Orange.widgets.settings import Setting, ContextSetting, PerfectDomainContextHandler\nfrom Orange.widgets.widget import OWWidget, Msg, Input, Output\nfrom nltk import ConcordanceIndex\nfrom orangecontrib.text.corpus import Corpus\nfrom orangecontrib.text.topics import Topic\nfrom orangecontrib.text.preprocess import WordPunctTokenizer\n\n\nclass HorizontalGridDelegate(QStyledItemDelegate):\n \"\"\"Class for setting elide.\"\"\"\n\n def paint(self, painter, option, index):\n if index.column() == 0:\n option.textElideMode = Qt.ElideLeft\n elif index.column() == 2:\n option.textElideMode = Qt.ElideRight\n QStyledItemDelegate.paint(self, painter, option, index)\n\n\nclass DocumentSelectionModel(QItemSelectionModel):\n \"\"\"Sets selection for QTableView. Creates a set of selected documents.\"\"\"\n\n def select(self, selection, flags):\n # which rows have been selected\n indexes = selection.indexes() if isinstance(selection, QItemSelection) \\\n else [selection]\n # prevent crashing when deleting the connection\n if not indexes:\n super().select(selection, flags)\n return\n # indexes[0].row() == -1 indicates clicking outside of the table\n if len(indexes) == 1 and indexes[0].row() == -1:\n self.clear()\n return\n word_index = self.model().word_index\n selected_docs = {word_index[index.row()][0] for index in indexes}\n selected_rows = [\n row_index for row_index, (doc_index, _) in enumerate(word_index)\n if doc_index in selected_docs]\n selection = QItemSelection()\n # select all rows belonging to the selected document\n for row in selected_rows:\n index = self.model().index(row, 0)\n selection.select(index, index)\n super().select(selection, flags)\n\n\nclass ConcordanceModel(QAbstractTableModel):\n \"\"\"A model for constructing concordances from text.\"\"\"\n\n def __init__(self):\n QAbstractTableModel.__init__(self)\n self.word = None\n self.corpus = None\n self.tokens = None\n self.n_tokens = None\n self.n_types = None\n self.indices = None\n self.word_index = None\n self.width = 8\n self.colored_rows = None\n\n def set_word(self, word):\n self.modelAboutToBeReset.emit()\n self.word = word\n self._compute_word_index()\n self.modelReset.emit()\n\n def set_corpus(self, corpus):\n self.modelAboutToBeReset.emit()\n self.corpus = corpus\n self.set_tokens()\n self._compute_indices()\n self._compute_word_index()\n self.modelReset.emit()\n\n def set_tokens(self):\n if self.corpus is None:\n self.tokens = None\n return\n tokenizer = WordPunctTokenizer()\n self.tokens = tokenizer(self.corpus.documents)\n self.n_tokens = sum(map(len, self.tokens))\n self.n_types = len(set(chain.from_iterable(self.tokens)))\n\n def set_width(self, width):\n self.modelAboutToBeReset.emit()\n self.width = width\n self.modelReset.emit()\n\n def flags(self, _):\n return Qt.ItemIsEnabled | Qt.ItemIsSelectable\n\n def rowCount(self, parent=QModelIndex(), *args, **kwargs):\n return 0 if parent.isValid() or self.word_index is None else len(self.word_index)\n\n def columnCount(self, parent=None, *args, **kwargs):\n return 3\n\n def data(self, index, role=Qt.DisplayRole):\n row, col = index.row(), index.column()\n doc, index = self.word_index[row]\n\n if role == Qt.DisplayRole:\n tokens = self.tokens\n if col == 0:\n return ' '.join(tokens[doc][max(index - self.width, 0):index])\n if col == 1:\n return tokens[doc][index]\n if col == 2:\n return ' '.join(tokens[doc][index + 1:index + self.width + 1])\n\n elif role == Qt.TextAlignmentRole:\n return [Qt.AlignRight | Qt.AlignVCenter,\n Qt.AlignCenter,\n Qt.AlignLeft | Qt.AlignVCenter][col]\n\n elif role == Qt.BackgroundRole:\n const = self.word_index[row][0] in self.colored_rows\n return QColor(236 + 19 * const, 243 + 12 * const, 255)\n\n def _compute_indices(self): # type: () -> Optional[None, list]\n if self.corpus is None:\n self.indices = None\n return\n self.indices = [ConcordanceIndex(doc, key=lambda x: x.lower())\n for doc in self.tokens]\n\n def _compute_word_index(self):\n if self.indices is None or self.word is None:\n self.word_index = self.colored_rows = None\n else:\n self.word_index = [\n (doc_idx, offset) for doc_idx, doc in enumerate(self.indices)\n for offset in doc.offsets(self.word)]\n self.colored_rows = set(sorted({d[0] for d in self.word_index})[::2])\n\n def matching_docs(self):\n if self.indices and self.word:\n return sum(bool(doc.offsets(self.word)) for doc in self.indices)\n else:\n return 0\n\n\nclass OWConcordance(OWWidget):\n name = \"Concordance\"\n description = \"Display the context of the word.\"\n icon = \"icons/Concordance.svg\"\n priority = 520\n\n class Inputs:\n corpus = Input(\"Corpus\", Corpus)\n query_word = Input(\"Query Word\", Topic)\n\n class Outputs:\n selected_documents = Output(\"Selected Documents\", Corpus)\n\n settingsHandler = PerfectDomainContextHandler(\n match_values = PerfectDomainContextHandler.MATCH_VALUES_ALL\n )\n autocommit = Setting(True)\n context_width = Setting(5)\n word = ContextSetting(\"\", exclude_metas=False)\n selected_rows = Setting([], schema_only=True)\n\n class Warning(OWWidget.Warning):\n multiple_words_on_input = Msg(\"Multiple query words on input. \"\n \"Only the first one is considered!\")\n\n def __init__(self):\n super().__init__()\n\n self.corpus = None # Corpus\n self.n_matching = '' # Info on docs matching the word\n self.n_tokens = '' # Info on tokens\n self.n_types = '' # Info on types (unique tokens)\n self.is_word_on_input = False\n\n # Info attributes\n info_box = gui.widgetBox(self.controlArea, 'Info')\n gui.label(info_box, self, 'Tokens: %(n_tokens)s')\n gui.label(info_box, self, 'Types: %(n_types)s')\n gui.label(info_box, self, 'Matching: %(n_matching)s')\n\n # Width parameter\n gui.spin(self.controlArea, self, 'context_width', 3, 10, box=True,\n label=\"Number of words:\", callback=self.set_width)\n\n gui.rubber(self.controlArea)\n\n # Search\n c_box = gui.widgetBox(self.mainArea, orientation=\"vertical\")\n self.input = gui.lineEdit(\n c_box, self, 'word', orientation=Qt.Horizontal,\n sizePolicy=QSizePolicy(QSizePolicy.MinimumExpanding,\n QSizePolicy.Fixed),\n label='Query:', callback=self.set_word, callbackOnType=True)\n self.input.setFocus()\n\n # Concordances view\n self.conc_view = QTableView()\n self.model = ConcordanceModel()\n self.conc_view.setModel(self.model)\n self.conc_view.setWordWrap(False)\n self.conc_view.setSelectionBehavior(QTableView.SelectRows)\n self.conc_view.setSelectionModel(DocumentSelectionModel(self.model))\n self.conc_view.setItemDelegate(HorizontalGridDelegate())\n self.conc_view.selectionModel().selectionChanged.connect(self.selection_changed)\n self.conc_view.horizontalHeader().hide()\n self.conc_view.setShowGrid(False)\n self.mainArea.layout().addWidget(self.conc_view)\n self.set_width()\n\n # Auto-commit box\n gui.auto_commit(self.controlArea, self, 'autocommit', 'Commit',\n 'Auto commit is on')\n\n def sizeHint(self): # pragma: no cover\n return QSize(600, 400)\n\n def set_width(self):\n sel = self.conc_view.selectionModel().selection()\n self.model.set_width(self.context_width)\n if sel:\n self.conc_view.selectionModel().select(sel,\n QItemSelectionModel.SelectCurrent | QItemSelectionModel.Rows)\n\n def selection_changed(self):\n selection = self.conc_view.selectionModel().selection()\n self.selected_rows = sorted(set(cell.row() for cell in selection.indexes()))\n self.commit()\n\n def set_selection(self, selection):\n if selection:\n sel = QItemSelection()\n for row in selection:\n index = self.conc_view.model().index(row, 0)\n sel.select(index, index)\n self.conc_view.selectionModel().select(sel,\n QItemSelectionModel.SelectCurrent | QItemSelectionModel.Rows)\n\n @Inputs.corpus\n def set_corpus(self, data=None):\n self.closeContext()\n self.corpus = data\n if data is None: # data removed, clear selection\n self.selected_rows = []\n\n if not self.is_word_on_input:\n self.word = \"\"\n self.openContext(self.corpus)\n\n self.model.set_corpus(self.corpus)\n self.set_word()\n\n @Inputs.query_word\n def set_word_from_input(self, topic):\n self.Warning.multiple_words_on_input.clear()\n if self.is_word_on_input: # word changed, clear selection\n self.selected_rows = []\n self.is_word_on_input = topic is not None and len(topic) > 0\n self.input.setEnabled(not self.is_word_on_input)\n if self.is_word_on_input:\n if len(topic) > 1:\n self.Warning.multiple_words_on_input()\n self.word = topic.metas[0, 0]\n self.set_word()\n\n def set_word(self):\n self.model.set_word(self.word)\n self.update_widget()\n self.commit()\n\n def handleNewSignals(self):\n self.set_selection(self.selected_rows)\n\n def resize_columns(self):\n col_width = (self.conc_view.width() -\n self.conc_view.columnWidth(1)) / 2 - 12\n self.conc_view.setColumnWidth(0, col_width)\n self.conc_view.setColumnWidth(2, col_width)\n\n def resizeEvent(self, event): # pragma: no cover\n super().resizeEvent(event)\n self.resize_columns()\n\n def update_widget(self):\n self.conc_view.resizeColumnToContents(1)\n self.resize_columns()\n self.conc_view.resizeRowsToContents()\n\n if self.corpus is not None:\n self.n_matching = '{}/{}'.format(\n self.model.matching_docs() if self.word else 0,\n len(self.corpus))\n self.n_tokens = self.model.n_tokens\n self.n_types = self.model.n_types\n else:\n self.n_matching = ''\n self.n_tokens = ''\n self.n_types = ''\n\n def commit(self):\n selected_docs = sorted(set(self.model.word_index[row][0]\n for row in self.selected_rows))\n if selected_docs:\n selected = self.corpus[selected_docs]\n self.Outputs.selected_documents.send(selected)\n else:\n self.Outputs.selected_documents.send(None)\n\n def send_report(self):\n view = self.conc_view\n model = self.conc_view.model()\n self.report_items(\"Concordances\", (\n (\"Query\", model.word),\n (\"Tokens\", model.n_tokens),\n (\"Types\", model.n_types),\n (\"Matching\", self.n_matching),\n ))\n self.report_table(view)\n\n\nif __name__ == '__main__': # pragma: no cover\n app = QApplication([])\n widget = OWConcordance()\n corpus = Corpus.from_file('book-excerpts')\n corpus = corpus[:3]\n widget.set_corpus(corpus)\n widget.show()\n app.exec()\n\n", "path": "orangecontrib/text/widgets/owconcordance.py"}], "after_files": [{"content": "from typing import Optional\n\nfrom itertools import chain\nfrom AnyQt.QtCore import Qt, QAbstractTableModel, QSize, QItemSelectionModel, \\\n QItemSelection, QModelIndex\nfrom AnyQt.QtWidgets import QSizePolicy, QApplication, QTableView, \\\n QStyledItemDelegate\nfrom AnyQt.QtGui import QColor\n\nfrom Orange.widgets import gui\nfrom Orange.widgets.settings import Setting, ContextSetting, PerfectDomainContextHandler\nfrom Orange.widgets.widget import OWWidget, Msg, Input, Output\nfrom nltk import ConcordanceIndex\nfrom orangecontrib.text.corpus import Corpus\nfrom orangecontrib.text.topics import Topic\nfrom orangecontrib.text.preprocess import WordPunctTokenizer\n\n\nclass HorizontalGridDelegate(QStyledItemDelegate):\n \"\"\"Class for setting elide.\"\"\"\n\n def paint(self, painter, option, index):\n if index.column() == 0:\n option.textElideMode = Qt.ElideLeft\n elif index.column() == 2:\n option.textElideMode = Qt.ElideRight\n QStyledItemDelegate.paint(self, painter, option, index)\n\n\nclass DocumentSelectionModel(QItemSelectionModel):\n \"\"\"Sets selection for QTableView. Creates a set of selected documents.\"\"\"\n\n def select(self, selection, flags):\n # which rows have been selected\n indexes = selection.indexes() if isinstance(selection, QItemSelection) \\\n else [selection]\n # prevent crashing when deleting the connection\n if not indexes:\n super().select(selection, flags)\n return\n # indexes[0].row() == -1 indicates clicking outside of the table\n if len(indexes) == 1 and indexes[0].row() == -1:\n self.clear()\n return\n word_index = self.model().word_index\n selected_docs = {word_index[index.row()][0] for index in indexes}\n selected_rows = [\n row_index for row_index, (doc_index, _) in enumerate(word_index)\n if doc_index in selected_docs]\n selection = QItemSelection()\n # select all rows belonging to the selected document\n for row in selected_rows:\n index = self.model().index(row, 0)\n selection.select(index, index)\n super().select(selection, flags)\n\n\nclass ConcordanceModel(QAbstractTableModel):\n \"\"\"A model for constructing concordances from text.\"\"\"\n\n def __init__(self):\n QAbstractTableModel.__init__(self)\n self.word = None\n self.corpus = None\n self.tokens = None\n self.n_tokens = None\n self.n_types = None\n self.indices = None\n self.word_index = None\n self.width = 8\n self.colored_rows = None\n\n def set_word(self, word):\n self.modelAboutToBeReset.emit()\n self.word = word\n self._compute_word_index()\n self.modelReset.emit()\n\n def set_corpus(self, corpus):\n self.modelAboutToBeReset.emit()\n self.corpus = corpus\n self.set_tokens()\n self._compute_indices()\n self._compute_word_index()\n self.modelReset.emit()\n\n def set_tokens(self):\n if self.corpus is None:\n self.tokens = None\n return\n tokenizer = WordPunctTokenizer()\n self.tokens = tokenizer(self.corpus.documents)\n self.n_tokens = sum(map(len, self.tokens))\n self.n_types = len(set(chain.from_iterable(self.tokens)))\n\n def set_width(self, width):\n self.modelAboutToBeReset.emit()\n self.width = width\n self.modelReset.emit()\n\n def flags(self, _):\n return Qt.ItemIsEnabled | Qt.ItemIsSelectable\n\n def rowCount(self, parent=QModelIndex(), *args, **kwargs):\n return 0 if parent.isValid() or self.word_index is None else len(self.word_index)\n\n def columnCount(self, parent=None, *args, **kwargs):\n return 3\n\n def data(self, index, role=Qt.DisplayRole):\n row, col = index.row(), index.column()\n doc, index = self.word_index[row]\n\n if role == Qt.DisplayRole:\n tokens = self.tokens\n if col == 0:\n return ' '.join(tokens[doc][max(index - self.width, 0):index])\n if col == 1:\n return tokens[doc][index]\n if col == 2:\n return ' '.join(tokens[doc][index + 1:index + self.width + 1])\n\n elif role == Qt.TextAlignmentRole:\n return [Qt.AlignRight | Qt.AlignVCenter,\n Qt.AlignCenter,\n Qt.AlignLeft | Qt.AlignVCenter][col]\n\n elif role == Qt.BackgroundRole:\n const = self.word_index[row][0] in self.colored_rows\n return QColor(236 + 19 * const, 243 + 12 * const, 255)\n\n def _compute_indices(self): # type: () -> Optional[None, list]\n if self.corpus is None:\n self.indices = None\n return\n self.indices = [ConcordanceIndex(doc, key=lambda x: x.lower())\n for doc in self.tokens]\n\n def _compute_word_index(self):\n if self.indices is None or self.word is None:\n self.word_index = self.colored_rows = None\n else:\n self.word_index = [\n (doc_idx, offset) for doc_idx, doc in enumerate(self.indices)\n for offset in doc.offsets(self.word)]\n self.colored_rows = set(sorted({d[0] for d in self.word_index})[::2])\n\n def matching_docs(self):\n if self.indices and self.word:\n return sum(bool(doc.offsets(self.word)) for doc in self.indices)\n else:\n return 0\n\n\nclass OWConcordance(OWWidget):\n name = \"Concordance\"\n description = \"Display the context of the word.\"\n icon = \"icons/Concordance.svg\"\n priority = 520\n\n class Inputs:\n corpus = Input(\"Corpus\", Corpus)\n query_word = Input(\"Query Word\", Topic)\n\n class Outputs:\n selected_documents = Output(\"Selected Documents\", Corpus)\n\n settingsHandler = PerfectDomainContextHandler(\n match_values = PerfectDomainContextHandler.MATCH_VALUES_ALL\n )\n autocommit = Setting(True)\n context_width = Setting(5)\n word = ContextSetting(\"\", exclude_metas=False)\n selected_rows = Setting([], schema_only=True)\n\n class Warning(OWWidget.Warning):\n multiple_words_on_input = Msg(\"Multiple query words on input. \"\n \"Only the first one is considered!\")\n\n def __init__(self):\n super().__init__()\n\n self.corpus = None # Corpus\n self.n_matching = '' # Info on docs matching the word\n self.n_tokens = '' # Info on tokens\n self.n_types = '' # Info on types (unique tokens)\n self.is_word_on_input = False\n\n # Info attributes\n info_box = gui.widgetBox(self.controlArea, 'Info')\n gui.label(info_box, self, 'Tokens: %(n_tokens)s')\n gui.label(info_box, self, 'Types: %(n_types)s')\n gui.label(info_box, self, 'Matching: %(n_matching)s')\n\n # Width parameter\n gui.spin(self.controlArea, self, 'context_width', 3, 10, box=True,\n label=\"Number of words:\", callback=self.set_width)\n\n gui.rubber(self.controlArea)\n\n # Search\n c_box = gui.widgetBox(self.mainArea, orientation=\"vertical\")\n self.input = gui.lineEdit(\n c_box, self, 'word', orientation=Qt.Horizontal,\n sizePolicy=QSizePolicy(QSizePolicy.MinimumExpanding,\n QSizePolicy.Fixed),\n label='Query:', callback=self.set_word, callbackOnType=True)\n self.input.setFocus()\n\n # Concordances view\n self.conc_view = QTableView()\n self.model = ConcordanceModel()\n self.conc_view.setModel(self.model)\n self.conc_view.setWordWrap(False)\n self.conc_view.setSelectionBehavior(QTableView.SelectRows)\n self.conc_view.setSelectionModel(DocumentSelectionModel(self.model))\n self.conc_view.setItemDelegate(HorizontalGridDelegate())\n self.conc_view.selectionModel().selectionChanged.connect(self.selection_changed)\n self.conc_view.horizontalHeader().hide()\n self.conc_view.setShowGrid(False)\n self.mainArea.layout().addWidget(self.conc_view)\n self.set_width()\n\n # Auto-commit box\n gui.auto_commit(self.controlArea, self, 'autocommit', 'Commit',\n 'Auto commit is on')\n\n def sizeHint(self): # pragma: no cover\n return QSize(600, 400)\n\n def set_width(self):\n sel = self.conc_view.selectionModel().selection()\n self.model.set_width(self.context_width)\n if sel:\n self.conc_view.selectionModel().select(sel,\n QItemSelectionModel.SelectCurrent | QItemSelectionModel.Rows)\n\n def selection_changed(self):\n selection = self.conc_view.selectionModel().selection()\n self.selected_rows = sorted(set(cell.row() for cell in selection.indexes()))\n self.commit()\n\n def set_selection(self, selection):\n if selection:\n sel = QItemSelection()\n for row in selection:\n index = self.conc_view.model().index(row, 0)\n sel.select(index, index)\n self.conc_view.selectionModel().select(sel,\n QItemSelectionModel.SelectCurrent | QItemSelectionModel.Rows)\n\n @Inputs.corpus\n def set_corpus(self, data=None):\n self.closeContext()\n self.corpus = data\n if data is None: # data removed, clear selection\n self.selected_rows = []\n\n if not self.is_word_on_input:\n self.word = \"\"\n self.openContext(self.corpus)\n\n self.model.set_corpus(self.corpus)\n self.set_word()\n\n @Inputs.query_word\n def set_word_from_input(self, topic):\n self.Warning.multiple_words_on_input.clear()\n if self.is_word_on_input: # word changed, clear selection\n self.selected_rows = []\n self.is_word_on_input = topic is not None and len(topic) > 0\n self.input.setEnabled(not self.is_word_on_input)\n if self.is_word_on_input:\n if len(topic) > 1:\n self.Warning.multiple_words_on_input()\n self.word = topic.metas[0, 0]\n self.set_word()\n\n def set_word(self):\n self.selected_rows = []\n self.model.set_word(self.word)\n self.update_widget()\n self.commit()\n\n def handleNewSignals(self):\n self.set_selection(self.selected_rows)\n\n def resize_columns(self):\n col_width = (self.conc_view.width() -\n self.conc_view.columnWidth(1)) / 2 - 12\n self.conc_view.setColumnWidth(0, col_width)\n self.conc_view.setColumnWidth(2, col_width)\n\n def resizeEvent(self, event): # pragma: no cover\n super().resizeEvent(event)\n self.resize_columns()\n\n def update_widget(self):\n self.conc_view.resizeColumnToContents(1)\n self.resize_columns()\n self.conc_view.resizeRowsToContents()\n\n if self.corpus is not None:\n self.n_matching = '{}/{}'.format(\n self.model.matching_docs() if self.word else 0,\n len(self.corpus))\n self.n_tokens = self.model.n_tokens\n self.n_types = self.model.n_types\n else:\n self.n_matching = ''\n self.n_tokens = ''\n self.n_types = ''\n\n def commit(self):\n selected_docs = sorted(set(self.model.word_index[row][0]\n for row in self.selected_rows))\n if selected_docs:\n selected = self.corpus[selected_docs]\n self.Outputs.selected_documents.send(selected)\n else:\n self.Outputs.selected_documents.send(None)\n\n def send_report(self):\n view = self.conc_view\n model = self.conc_view.model()\n self.report_items(\"Concordances\", (\n (\"Query\", model.word),\n (\"Tokens\", model.n_tokens),\n (\"Types\", model.n_types),\n (\"Matching\", self.n_matching),\n ))\n self.report_table(view)\n\n\nif __name__ == '__main__': # pragma: no cover\n app = QApplication([])\n widget = OWConcordance()\n corpus = Corpus.from_file('book-excerpts')\n corpus = corpus[:3]\n widget.set_corpus(corpus)\n widget.show()\n app.exec()\n\n", "path": "orangecontrib/text/widgets/owconcordance.py"}]} | 4,068 | 106 |
gh_patches_debug_34685 | rasdani/github-patches | git_diff | mabel-dev__opteryx-1501 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
✨ enable SQLAlchemy logging
### Thanks for stopping by to let us know something could be better!
**Is your feature request related to a problem? Please describe.** _A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]_
**Describe the solution you'd like** _A clear and concise description of what you want to happen._
**Describe alternatives you've considered** _A clear and concise description of any alternative solutions or features you've considered._
**Additional context** _Add any other context or screenshots about the feature request here._
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opteryx/connectors/sql_connector.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 """
14 The SQL Connector downloads data from remote servers and converts them
15 to pyarrow tables so they can be processed as per any other data source.
16 """
17 import time
18 from decimal import Decimal
19 from typing import Any
20 from typing import Dict
21 from typing import Generator
22 from typing import Tuple
23
24 import pyarrow
25 from orso import DataFrame
26 from orso.schema import ConstantColumn
27 from orso.schema import FlatColumn
28 from orso.schema import RelationSchema
29 from orso.tools import random_string
30 from orso.types import PYTHON_TO_ORSO_MAP
31
32 from opteryx.connectors.base.base_connector import DEFAULT_MORSEL_SIZE
33 from opteryx.connectors.base.base_connector import INITIAL_CHUNK_SIZE
34 from opteryx.connectors.base.base_connector import MIN_CHUNK_SIZE
35 from opteryx.connectors.base.base_connector import BaseConnector
36 from opteryx.connectors.capabilities import PredicatePushable
37 from opteryx.exceptions import MissingDependencyError
38 from opteryx.exceptions import UnmetRequirementError
39 from opteryx.managers.expression import Node
40 from opteryx.managers.expression import NodeType
41 from opteryx.third_party.query_builder import Query
42
43
44 def _handle_operand(operand: Node, parameters: dict) -> Tuple[Any, dict]:
45 if operand.node_type == NodeType.IDENTIFIER:
46 return operand.source_column, parameters
47
48 literal = operand.value
49 if hasattr(literal, "item"):
50 literal = literal.item()
51
52 name = random_string(8)
53 parameters[name] = literal
54 return f":{name}", parameters
55
56
57 class SqlConnector(BaseConnector, PredicatePushable):
58 __mode__ = "Sql"
59
60 PUSHABLE_OPS: Dict[str, bool] = {
61 "Eq": True,
62 "NotEq": True,
63 "Gt": True,
64 "GtEq": True,
65 "Lt": True,
66 "LtEq": True,
67 "Like": True,
68 "NotLike": True,
69 }
70
71 OPS_XLAT: Dict[str, str] = {
72 "Eq": "=",
73 "NotEq": "!=",
74 "Gt": ">",
75 "GtEq": ">=",
76 "Lt": "<",
77 "LtEq": "<=",
78 "Like": "LIKE",
79 "NotLike": "NOT LIKE",
80 "IsTrue": "IS TRUE",
81 "IsNotTrue": "IS NOT TRUE",
82 "IsFalse": "IS FALSE",
83 "IsNotFalse": "IS NOT FALSE",
84 "IsNull": "IS NULL",
85 "IsNotNull": "IS NOT NULL",
86 }
87
88 def __init__(self, *args, connection: str = None, engine=None, **kwargs):
89 BaseConnector.__init__(self, **kwargs)
90 PredicatePushable.__init__(self, **kwargs)
91
92 try:
93 from sqlalchemy import MetaData
94 from sqlalchemy import create_engine
95 from sqlalchemy.pool import NullPool
96 except ImportError as err: # pragma: nocover
97 raise MissingDependencyError(err.name) from err
98
99 if engine is None and connection is None: # pragma: no cover
100 raise UnmetRequirementError(
101 "SQL Connections require either a SQL Alchemy connection string in the 'connection' parameter, or a SQL Alchemy Engine in the 'engine' parameter."
102 )
103
104 # create the SqlAlchemy engine
105 if engine is None:
106 self._engine = create_engine(connection, poolclass=NullPool)
107 else:
108 self._engine = engine
109
110 self.schema = None # type: ignore
111 self.metadata = MetaData()
112
113 def can_push(self, operator: Node, types: set = None) -> bool:
114 if super().can_push(operator, types):
115 return True
116 return operator.condition.node_type == NodeType.UNARY_OPERATOR
117
118 def read_dataset( # type:ignore
119 self,
120 *,
121 columns: list = None,
122 predicates: list = None,
123 chunk_size: int = INITIAL_CHUNK_SIZE, # type:ignore
124 ) -> Generator[pyarrow.Table, None, None]: # type:ignore
125 from sqlalchemy.sql import text
126
127 self.chunk_size = chunk_size
128 result_schema = self.schema
129
130 query_builder = Query().FROM(self.dataset)
131
132 # Update the SQL and the target morsel schema if we've pushed a projection
133 if columns:
134 column_names = [col.name for col in columns]
135 query_builder.add("SELECT", *column_names)
136 result_schema.columns = [ # type:ignore
137 col for col in self.schema.columns if col.name in column_names # type:ignore
138 ]
139 elif self.schema.columns: # type:ignore
140 query_builder.add("SELECT", "*")
141 else:
142 query_builder.add("SELECT", "1")
143 self.schema.columns.append(ConstantColumn(name="1", value=1)) # type:ignore
144
145 # Update SQL if we've pushed predicates
146 parameters: dict = {}
147 for predicate in predicates:
148 if predicate.node_type == NodeType.UNARY_OPERATOR:
149 operand = predicate.centre.current_name
150 operator = self.OPS_XLAT[predicate.value]
151
152 query_builder.WHERE(f"{operand} {operator}")
153 else:
154 left_operand = predicate.left
155 right_operand = predicate.right
156 operator = self.OPS_XLAT[predicate.value]
157
158 left_value, parameters = _handle_operand(left_operand, parameters)
159 right_value, parameters = _handle_operand(right_operand, parameters)
160
161 query_builder.WHERE(f"{left_value} {operator} {right_value}")
162
163 at_least_once = False
164
165 convert_time = 0.0
166
167 with self._engine.connect() as conn:
168 # DEBUG: log ("READ DATASET\n", str(query_builder))
169 # DEBUG: log ("PARAMETERS\n", parameters)
170 # Execution Options allows us to handle datasets larger than memory
171 result = conn.execution_options(stream_results=True, max_row_buffer=10000).execute(
172 text(str(query_builder)), parameters=parameters
173 )
174
175 while True:
176 batch_rows = result.fetchmany(self.chunk_size)
177 if not batch_rows:
178 break
179
180 # convert the SqlAlchemy Results to Arrow using Orso
181 b = time.monotonic_ns()
182 morsel = DataFrame(schema=result_schema, rows=batch_rows).arrow()
183 convert_time += time.monotonic_ns() - b
184 yield morsel
185 at_least_once = True
186
187 # Dynamically adjust chunk size based on the data size, we start by downloading
188 # 500 records to get an idea of the row size, assuming these 500 are
189 # representative, we work out how many rows fit into 16Mb (check setting).
190 # Don't keep recalculating, this is not a cheap operation and it's predicting
191 # the future so isn't going to ever be 100% correct
192 if self.chunk_size == INITIAL_CHUNK_SIZE and morsel.nbytes > 0:
193 self.chunk_size = int(len(morsel) // (morsel.nbytes / DEFAULT_MORSEL_SIZE)) + 1
194 self.chunk_size = (self.chunk_size // MIN_CHUNK_SIZE) * MIN_CHUNK_SIZE
195 self.chunk_size = max(self.chunk_size, MIN_CHUNK_SIZE)
196 # DEBUG: log (f"CHANGING CHUNK SIZE TO {self.chunk_size} was {INITIAL_CHUNK_SIZE}.")
197
198 if not at_least_once:
199 yield DataFrame(schema=result_schema).arrow()
200
201 # DEBUG: log (f"time spent converting: {convert_time/1e9}s")
202
203 def get_dataset_schema(self) -> RelationSchema:
204 from sqlalchemy import Table
205
206 if self.schema:
207 return self.schema
208
209 # Try to read the schema from the metastore
210 self.schema = self.read_schema_from_metastore()
211 if self.schema:
212 return self.schema
213
214 # get the schema from the dataset
215 # DEBUG: log ("GET SQL SCHEMA:", self.dataset)
216 try:
217 table = Table(self.dataset, self.metadata, autoload_with=self._engine)
218
219 self.schema = RelationSchema(
220 name=table.name,
221 columns=[
222 FlatColumn(
223 name=column.name,
224 type=PYTHON_TO_ORSO_MAP[column.type.python_type],
225 precision=(
226 None
227 if column.type.python_type != Decimal
228 else column.type.precision # type:ignore
229 ),
230 scale=(
231 None
232 if column.type.python_type != Decimal
233 else column.type.scale # type:ignore
234 ),
235 nullable=column.nullable,
236 )
237 for column in table.columns
238 ],
239 )
240 except Exception as err:
241 # Fall back to getting the schema from the first row, this is the column names, and where
242 # possible, column types.
243 # DEBUG: log (f"APPROXIMATING SCHEMA OF {self.dataset} BECAUSE OF {type(err).__name__}({err})")
244 from sqlalchemy.sql import text
245
246 with self._engine.connect() as conn:
247 query = Query().SELECT("*").FROM(self.dataset).LIMIT("1")
248 # DEBUG: log ("READ ROW\n", str(query))
249 row = conn.execute(text(str(query))).fetchone()._asdict()
250 # DEBUG: log ("ROW:", row)
251 self.schema = RelationSchema(
252 name=self.dataset,
253 columns=[
254 FlatColumn(
255 name=column,
256 type=0 if value is None else PYTHON_TO_ORSO_MAP[type(value)],
257 )
258 for column, value in row.items()
259 ],
260 )
261
262 return self.schema
263
```
Path: `opteryx/__version__.py`
Content:
```
1 __build__ = 340
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Store the version here so:
17 1) we don't load dependencies by storing it in __init__.py
18 2) we can import it in setup.py for the same reason
19 """
20 from enum import Enum # isort: skip
21
22
23 class VersionStatus(Enum):
24 ALPHA = "alpha"
25 BETA = "beta"
26 RELEASE = "release"
27
28
29 _major = 0
30 _minor = 13
31 _revision = 4
32 _status = VersionStatus.ALPHA
33
34 __author__ = "@joocer"
35 __version__ = f"{_major}.{_minor}.{_revision}" + (
36 f"-{_status.value}.{__build__}" if _status != VersionStatus.RELEASE else ""
37 )
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opteryx/__version__.py b/opteryx/__version__.py
--- a/opteryx/__version__.py
+++ b/opteryx/__version__.py
@@ -1,4 +1,4 @@
-__build__ = 340
+__build__ = 341
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/opteryx/connectors/sql_connector.py b/opteryx/connectors/sql_connector.py
--- a/opteryx/connectors/sql_connector.py
+++ b/opteryx/connectors/sql_connector.py
@@ -14,6 +14,7 @@
The SQL Connector downloads data from remote servers and converts them
to pyarrow tables so they can be processed as per any other data source.
"""
+import os
import time
from decimal import Decimal
from typing import Any
@@ -40,6 +41,8 @@
from opteryx.managers.expression import NodeType
from opteryx.third_party.query_builder import Query
+DEBUG_ENABLED = os.environ.get("OPTERYX_DEBUG") is not None
+
def _handle_operand(operand: Node, parameters: dict) -> Tuple[Any, dict]:
if operand.node_type == NodeType.IDENTIFIER:
@@ -103,7 +106,7 @@
# create the SqlAlchemy engine
if engine is None:
- self._engine = create_engine(connection, poolclass=NullPool)
+ self._engine = create_engine(connection, poolclass=NullPool, echo=DEBUG_ENABLED)
else:
self._engine = engine
@@ -168,7 +171,7 @@
# DEBUG: log ("READ DATASET\n", str(query_builder))
# DEBUG: log ("PARAMETERS\n", parameters)
# Execution Options allows us to handle datasets larger than memory
- result = conn.execution_options(stream_results=True, max_row_buffer=10000).execute(
+ result = conn.execution_options(stream_results=True, max_row_buffer=25000).execute(
text(str(query_builder)), parameters=parameters
)
| {"golden_diff": "diff --git a/opteryx/__version__.py b/opteryx/__version__.py\n--- a/opteryx/__version__.py\n+++ b/opteryx/__version__.py\n@@ -1,4 +1,4 @@\n-__build__ = 340\n+__build__ = 341\n \n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\ndiff --git a/opteryx/connectors/sql_connector.py b/opteryx/connectors/sql_connector.py\n--- a/opteryx/connectors/sql_connector.py\n+++ b/opteryx/connectors/sql_connector.py\n@@ -14,6 +14,7 @@\n The SQL Connector downloads data from remote servers and converts them\n to pyarrow tables so they can be processed as per any other data source.\n \"\"\"\n+import os\n import time\n from decimal import Decimal\n from typing import Any\n@@ -40,6 +41,8 @@\n from opteryx.managers.expression import NodeType\n from opteryx.third_party.query_builder import Query\n \n+DEBUG_ENABLED = os.environ.get(\"OPTERYX_DEBUG\") is not None\n+\n \n def _handle_operand(operand: Node, parameters: dict) -> Tuple[Any, dict]:\n if operand.node_type == NodeType.IDENTIFIER:\n@@ -103,7 +106,7 @@\n \n # create the SqlAlchemy engine\n if engine is None:\n- self._engine = create_engine(connection, poolclass=NullPool)\n+ self._engine = create_engine(connection, poolclass=NullPool, echo=DEBUG_ENABLED)\n else:\n self._engine = engine\n \n@@ -168,7 +171,7 @@\n # DEBUG: log (\"READ DATASET\\n\", str(query_builder))\n # DEBUG: log (\"PARAMETERS\\n\", parameters)\n # Execution Options allows us to handle datasets larger than memory\n- result = conn.execution_options(stream_results=True, max_row_buffer=10000).execute(\n+ result = conn.execution_options(stream_results=True, max_row_buffer=25000).execute(\n text(str(query_builder)), parameters=parameters\n )\n", "issue": "\u2728 enable SQLAlchemy logging\n### Thanks for stopping by to let us know something could be better!\r\n\r\n**Is your feature request related to a problem? Please describe.** _A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]_\r\n\r\n**Describe the solution you'd like** _A clear and concise description of what you want to happen._\r\n\r\n**Describe alternatives you've considered** _A clear and concise description of any alternative solutions or features you've considered._\r\n\r\n**Additional context** _Add any other context or screenshots about the feature request here._\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe SQL Connector downloads data from remote servers and converts them\nto pyarrow tables so they can be processed as per any other data source.\n\"\"\"\nimport time\nfrom decimal import Decimal\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import Tuple\n\nimport pyarrow\nfrom orso import DataFrame\nfrom orso.schema import ConstantColumn\nfrom orso.schema import FlatColumn\nfrom orso.schema import RelationSchema\nfrom orso.tools import random_string\nfrom orso.types import PYTHON_TO_ORSO_MAP\n\nfrom opteryx.connectors.base.base_connector import DEFAULT_MORSEL_SIZE\nfrom opteryx.connectors.base.base_connector import INITIAL_CHUNK_SIZE\nfrom opteryx.connectors.base.base_connector import MIN_CHUNK_SIZE\nfrom opteryx.connectors.base.base_connector import BaseConnector\nfrom opteryx.connectors.capabilities import PredicatePushable\nfrom opteryx.exceptions import MissingDependencyError\nfrom opteryx.exceptions import UnmetRequirementError\nfrom opteryx.managers.expression import Node\nfrom opteryx.managers.expression import NodeType\nfrom opteryx.third_party.query_builder import Query\n\n\ndef _handle_operand(operand: Node, parameters: dict) -> Tuple[Any, dict]:\n if operand.node_type == NodeType.IDENTIFIER:\n return operand.source_column, parameters\n\n literal = operand.value\n if hasattr(literal, \"item\"):\n literal = literal.item()\n\n name = random_string(8)\n parameters[name] = literal\n return f\":{name}\", parameters\n\n\nclass SqlConnector(BaseConnector, PredicatePushable):\n __mode__ = \"Sql\"\n\n PUSHABLE_OPS: Dict[str, bool] = {\n \"Eq\": True,\n \"NotEq\": True,\n \"Gt\": True,\n \"GtEq\": True,\n \"Lt\": True,\n \"LtEq\": True,\n \"Like\": True,\n \"NotLike\": True,\n }\n\n OPS_XLAT: Dict[str, str] = {\n \"Eq\": \"=\",\n \"NotEq\": \"!=\",\n \"Gt\": \">\",\n \"GtEq\": \">=\",\n \"Lt\": \"<\",\n \"LtEq\": \"<=\",\n \"Like\": \"LIKE\",\n \"NotLike\": \"NOT LIKE\",\n \"IsTrue\": \"IS TRUE\",\n \"IsNotTrue\": \"IS NOT TRUE\",\n \"IsFalse\": \"IS FALSE\",\n \"IsNotFalse\": \"IS NOT FALSE\",\n \"IsNull\": \"IS NULL\",\n \"IsNotNull\": \"IS NOT NULL\",\n }\n\n def __init__(self, *args, connection: str = None, engine=None, **kwargs):\n BaseConnector.__init__(self, **kwargs)\n PredicatePushable.__init__(self, **kwargs)\n\n try:\n from sqlalchemy import MetaData\n from sqlalchemy import create_engine\n from sqlalchemy.pool import NullPool\n except ImportError as err: # pragma: nocover\n raise MissingDependencyError(err.name) from err\n\n if engine is None and connection is None: # pragma: no cover\n raise UnmetRequirementError(\n \"SQL Connections require either a SQL Alchemy connection string in the 'connection' parameter, or a SQL Alchemy Engine in the 'engine' parameter.\"\n )\n\n # create the SqlAlchemy engine\n if engine is None:\n self._engine = create_engine(connection, poolclass=NullPool)\n else:\n self._engine = engine\n\n self.schema = None # type: ignore\n self.metadata = MetaData()\n\n def can_push(self, operator: Node, types: set = None) -> bool:\n if super().can_push(operator, types):\n return True\n return operator.condition.node_type == NodeType.UNARY_OPERATOR\n\n def read_dataset( # type:ignore\n self,\n *,\n columns: list = None,\n predicates: list = None,\n chunk_size: int = INITIAL_CHUNK_SIZE, # type:ignore\n ) -> Generator[pyarrow.Table, None, None]: # type:ignore\n from sqlalchemy.sql import text\n\n self.chunk_size = chunk_size\n result_schema = self.schema\n\n query_builder = Query().FROM(self.dataset)\n\n # Update the SQL and the target morsel schema if we've pushed a projection\n if columns:\n column_names = [col.name for col in columns]\n query_builder.add(\"SELECT\", *column_names)\n result_schema.columns = [ # type:ignore\n col for col in self.schema.columns if col.name in column_names # type:ignore\n ]\n elif self.schema.columns: # type:ignore\n query_builder.add(\"SELECT\", \"*\")\n else:\n query_builder.add(\"SELECT\", \"1\")\n self.schema.columns.append(ConstantColumn(name=\"1\", value=1)) # type:ignore\n\n # Update SQL if we've pushed predicates\n parameters: dict = {}\n for predicate in predicates:\n if predicate.node_type == NodeType.UNARY_OPERATOR:\n operand = predicate.centre.current_name\n operator = self.OPS_XLAT[predicate.value]\n\n query_builder.WHERE(f\"{operand} {operator}\")\n else:\n left_operand = predicate.left\n right_operand = predicate.right\n operator = self.OPS_XLAT[predicate.value]\n\n left_value, parameters = _handle_operand(left_operand, parameters)\n right_value, parameters = _handle_operand(right_operand, parameters)\n\n query_builder.WHERE(f\"{left_value} {operator} {right_value}\")\n\n at_least_once = False\n\n convert_time = 0.0\n\n with self._engine.connect() as conn:\n # DEBUG: log (\"READ DATASET\\n\", str(query_builder))\n # DEBUG: log (\"PARAMETERS\\n\", parameters)\n # Execution Options allows us to handle datasets larger than memory\n result = conn.execution_options(stream_results=True, max_row_buffer=10000).execute(\n text(str(query_builder)), parameters=parameters\n )\n\n while True:\n batch_rows = result.fetchmany(self.chunk_size)\n if not batch_rows:\n break\n\n # convert the SqlAlchemy Results to Arrow using Orso\n b = time.monotonic_ns()\n morsel = DataFrame(schema=result_schema, rows=batch_rows).arrow()\n convert_time += time.monotonic_ns() - b\n yield morsel\n at_least_once = True\n\n # Dynamically adjust chunk size based on the data size, we start by downloading\n # 500 records to get an idea of the row size, assuming these 500 are\n # representative, we work out how many rows fit into 16Mb (check setting).\n # Don't keep recalculating, this is not a cheap operation and it's predicting\n # the future so isn't going to ever be 100% correct\n if self.chunk_size == INITIAL_CHUNK_SIZE and morsel.nbytes > 0:\n self.chunk_size = int(len(morsel) // (morsel.nbytes / DEFAULT_MORSEL_SIZE)) + 1\n self.chunk_size = (self.chunk_size // MIN_CHUNK_SIZE) * MIN_CHUNK_SIZE\n self.chunk_size = max(self.chunk_size, MIN_CHUNK_SIZE)\n # DEBUG: log (f\"CHANGING CHUNK SIZE TO {self.chunk_size} was {INITIAL_CHUNK_SIZE}.\")\n\n if not at_least_once:\n yield DataFrame(schema=result_schema).arrow()\n\n # DEBUG: log (f\"time spent converting: {convert_time/1e9}s\")\n\n def get_dataset_schema(self) -> RelationSchema:\n from sqlalchemy import Table\n\n if self.schema:\n return self.schema\n\n # Try to read the schema from the metastore\n self.schema = self.read_schema_from_metastore()\n if self.schema:\n return self.schema\n\n # get the schema from the dataset\n # DEBUG: log (\"GET SQL SCHEMA:\", self.dataset)\n try:\n table = Table(self.dataset, self.metadata, autoload_with=self._engine)\n\n self.schema = RelationSchema(\n name=table.name,\n columns=[\n FlatColumn(\n name=column.name,\n type=PYTHON_TO_ORSO_MAP[column.type.python_type],\n precision=(\n None\n if column.type.python_type != Decimal\n else column.type.precision # type:ignore\n ),\n scale=(\n None\n if column.type.python_type != Decimal\n else column.type.scale # type:ignore\n ),\n nullable=column.nullable,\n )\n for column in table.columns\n ],\n )\n except Exception as err:\n # Fall back to getting the schema from the first row, this is the column names, and where\n # possible, column types.\n # DEBUG: log (f\"APPROXIMATING SCHEMA OF {self.dataset} BECAUSE OF {type(err).__name__}({err})\")\n from sqlalchemy.sql import text\n\n with self._engine.connect() as conn:\n query = Query().SELECT(\"*\").FROM(self.dataset).LIMIT(\"1\")\n # DEBUG: log (\"READ ROW\\n\", str(query))\n row = conn.execute(text(str(query))).fetchone()._asdict()\n # DEBUG: log (\"ROW:\", row)\n self.schema = RelationSchema(\n name=self.dataset,\n columns=[\n FlatColumn(\n name=column,\n type=0 if value is None else PYTHON_TO_ORSO_MAP[type(value)],\n )\n for column, value in row.items()\n ],\n )\n\n return self.schema\n", "path": "opteryx/connectors/sql_connector.py"}, {"content": "__build__ = 340\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 13\n_revision = 4\n_status = VersionStatus.ALPHA\n\n__author__ = \"@joocer\"\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe SQL Connector downloads data from remote servers and converts them\nto pyarrow tables so they can be processed as per any other data source.\n\"\"\"\nimport os\nimport time\nfrom decimal import Decimal\nfrom typing import Any\nfrom typing import Dict\nfrom typing import Generator\nfrom typing import Tuple\n\nimport pyarrow\nfrom orso import DataFrame\nfrom orso.schema import ConstantColumn\nfrom orso.schema import FlatColumn\nfrom orso.schema import RelationSchema\nfrom orso.tools import random_string\nfrom orso.types import PYTHON_TO_ORSO_MAP\n\nfrom opteryx.connectors.base.base_connector import DEFAULT_MORSEL_SIZE\nfrom opteryx.connectors.base.base_connector import INITIAL_CHUNK_SIZE\nfrom opteryx.connectors.base.base_connector import MIN_CHUNK_SIZE\nfrom opteryx.connectors.base.base_connector import BaseConnector\nfrom opteryx.connectors.capabilities import PredicatePushable\nfrom opteryx.exceptions import MissingDependencyError\nfrom opteryx.exceptions import UnmetRequirementError\nfrom opteryx.managers.expression import Node\nfrom opteryx.managers.expression import NodeType\nfrom opteryx.third_party.query_builder import Query\n\nDEBUG_ENABLED = os.environ.get(\"OPTERYX_DEBUG\") is not None\n\n\ndef _handle_operand(operand: Node, parameters: dict) -> Tuple[Any, dict]:\n if operand.node_type == NodeType.IDENTIFIER:\n return operand.source_column, parameters\n\n literal = operand.value\n if hasattr(literal, \"item\"):\n literal = literal.item()\n\n name = random_string(8)\n parameters[name] = literal\n return f\":{name}\", parameters\n\n\nclass SqlConnector(BaseConnector, PredicatePushable):\n __mode__ = \"Sql\"\n\n PUSHABLE_OPS: Dict[str, bool] = {\n \"Eq\": True,\n \"NotEq\": True,\n \"Gt\": True,\n \"GtEq\": True,\n \"Lt\": True,\n \"LtEq\": True,\n \"Like\": True,\n \"NotLike\": True,\n }\n\n OPS_XLAT: Dict[str, str] = {\n \"Eq\": \"=\",\n \"NotEq\": \"!=\",\n \"Gt\": \">\",\n \"GtEq\": \">=\",\n \"Lt\": \"<\",\n \"LtEq\": \"<=\",\n \"Like\": \"LIKE\",\n \"NotLike\": \"NOT LIKE\",\n \"IsTrue\": \"IS TRUE\",\n \"IsNotTrue\": \"IS NOT TRUE\",\n \"IsFalse\": \"IS FALSE\",\n \"IsNotFalse\": \"IS NOT FALSE\",\n \"IsNull\": \"IS NULL\",\n \"IsNotNull\": \"IS NOT NULL\",\n }\n\n def __init__(self, *args, connection: str = None, engine=None, **kwargs):\n BaseConnector.__init__(self, **kwargs)\n PredicatePushable.__init__(self, **kwargs)\n\n try:\n from sqlalchemy import MetaData\n from sqlalchemy import create_engine\n from sqlalchemy.pool import NullPool\n except ImportError as err: # pragma: nocover\n raise MissingDependencyError(err.name) from err\n\n if engine is None and connection is None: # pragma: no cover\n raise UnmetRequirementError(\n \"SQL Connections require either a SQL Alchemy connection string in the 'connection' parameter, or a SQL Alchemy Engine in the 'engine' parameter.\"\n )\n\n # create the SqlAlchemy engine\n if engine is None:\n self._engine = create_engine(connection, poolclass=NullPool, echo=DEBUG_ENABLED)\n else:\n self._engine = engine\n\n self.schema = None # type: ignore\n self.metadata = MetaData()\n\n def can_push(self, operator: Node, types: set = None) -> bool:\n if super().can_push(operator, types):\n return True\n return operator.condition.node_type == NodeType.UNARY_OPERATOR\n\n def read_dataset( # type:ignore\n self,\n *,\n columns: list = None,\n predicates: list = None,\n chunk_size: int = INITIAL_CHUNK_SIZE, # type:ignore\n ) -> Generator[pyarrow.Table, None, None]: # type:ignore\n from sqlalchemy.sql import text\n\n self.chunk_size = chunk_size\n result_schema = self.schema\n\n query_builder = Query().FROM(self.dataset)\n\n # Update the SQL and the target morsel schema if we've pushed a projection\n if columns:\n column_names = [col.name for col in columns]\n query_builder.add(\"SELECT\", *column_names)\n result_schema.columns = [ # type:ignore\n col for col in self.schema.columns if col.name in column_names # type:ignore\n ]\n elif self.schema.columns: # type:ignore\n query_builder.add(\"SELECT\", \"*\")\n else:\n query_builder.add(\"SELECT\", \"1\")\n self.schema.columns.append(ConstantColumn(name=\"1\", value=1)) # type:ignore\n\n # Update SQL if we've pushed predicates\n parameters: dict = {}\n for predicate in predicates:\n if predicate.node_type == NodeType.UNARY_OPERATOR:\n operand = predicate.centre.current_name\n operator = self.OPS_XLAT[predicate.value]\n\n query_builder.WHERE(f\"{operand} {operator}\")\n else:\n left_operand = predicate.left\n right_operand = predicate.right\n operator = self.OPS_XLAT[predicate.value]\n\n left_value, parameters = _handle_operand(left_operand, parameters)\n right_value, parameters = _handle_operand(right_operand, parameters)\n\n query_builder.WHERE(f\"{left_value} {operator} {right_value}\")\n\n at_least_once = False\n\n convert_time = 0.0\n\n with self._engine.connect() as conn:\n # DEBUG: log (\"READ DATASET\\n\", str(query_builder))\n # DEBUG: log (\"PARAMETERS\\n\", parameters)\n # Execution Options allows us to handle datasets larger than memory\n result = conn.execution_options(stream_results=True, max_row_buffer=25000).execute(\n text(str(query_builder)), parameters=parameters\n )\n\n while True:\n batch_rows = result.fetchmany(self.chunk_size)\n if not batch_rows:\n break\n\n # convert the SqlAlchemy Results to Arrow using Orso\n b = time.monotonic_ns()\n morsel = DataFrame(schema=result_schema, rows=batch_rows).arrow()\n convert_time += time.monotonic_ns() - b\n yield morsel\n at_least_once = True\n\n # Dynamically adjust chunk size based on the data size, we start by downloading\n # 500 records to get an idea of the row size, assuming these 500 are\n # representative, we work out how many rows fit into 16Mb (check setting).\n # Don't keep recalculating, this is not a cheap operation and it's predicting\n # the future so isn't going to ever be 100% correct\n if self.chunk_size == INITIAL_CHUNK_SIZE and morsel.nbytes > 0:\n self.chunk_size = int(len(morsel) // (morsel.nbytes / DEFAULT_MORSEL_SIZE)) + 1\n self.chunk_size = (self.chunk_size // MIN_CHUNK_SIZE) * MIN_CHUNK_SIZE\n self.chunk_size = max(self.chunk_size, MIN_CHUNK_SIZE)\n # DEBUG: log (f\"CHANGING CHUNK SIZE TO {self.chunk_size} was {INITIAL_CHUNK_SIZE}.\")\n\n if not at_least_once:\n yield DataFrame(schema=result_schema).arrow()\n\n # DEBUG: log (f\"time spent converting: {convert_time/1e9}s\")\n\n def get_dataset_schema(self) -> RelationSchema:\n from sqlalchemy import Table\n\n if self.schema:\n return self.schema\n\n # Try to read the schema from the metastore\n self.schema = self.read_schema_from_metastore()\n if self.schema:\n return self.schema\n\n # get the schema from the dataset\n # DEBUG: log (\"GET SQL SCHEMA:\", self.dataset)\n try:\n table = Table(self.dataset, self.metadata, autoload_with=self._engine)\n\n self.schema = RelationSchema(\n name=table.name,\n columns=[\n FlatColumn(\n name=column.name,\n type=PYTHON_TO_ORSO_MAP[column.type.python_type],\n precision=(\n None\n if column.type.python_type != Decimal\n else column.type.precision # type:ignore\n ),\n scale=(\n None\n if column.type.python_type != Decimal\n else column.type.scale # type:ignore\n ),\n nullable=column.nullable,\n )\n for column in table.columns\n ],\n )\n except Exception as err:\n # Fall back to getting the schema from the first row, this is the column names, and where\n # possible, column types.\n # DEBUG: log (f\"APPROXIMATING SCHEMA OF {self.dataset} BECAUSE OF {type(err).__name__}({err})\")\n from sqlalchemy.sql import text\n\n with self._engine.connect() as conn:\n query = Query().SELECT(\"*\").FROM(self.dataset).LIMIT(\"1\")\n # DEBUG: log (\"READ ROW\\n\", str(query))\n row = conn.execute(text(str(query))).fetchone()._asdict()\n # DEBUG: log (\"ROW:\", row)\n self.schema = RelationSchema(\n name=self.dataset,\n columns=[\n FlatColumn(\n name=column,\n type=0 if value is None else PYTHON_TO_ORSO_MAP[type(value)],\n )\n for column, value in row.items()\n ],\n )\n\n return self.schema\n", "path": "opteryx/connectors/sql_connector.py"}, {"content": "__build__ = 341\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 13\n_revision = 4\n_status = VersionStatus.ALPHA\n\n__author__ = \"@joocer\"\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}]} | 3,609 | 474 |
gh_patches_debug_9502 | rasdani/github-patches | git_diff | redis__redis-py-2112 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support CASESENSITIVE tag in Tag Field
link: https://oss.redis.com/redisearch/Commands/#ftcreate
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redis/commands/search/field.py`
Content:
```
1 from typing import List
2
3 from redis import DataError
4
5
6 class Field:
7
8 NUMERIC = "NUMERIC"
9 TEXT = "TEXT"
10 WEIGHT = "WEIGHT"
11 GEO = "GEO"
12 TAG = "TAG"
13 VECTOR = "VECTOR"
14 SORTABLE = "SORTABLE"
15 NOINDEX = "NOINDEX"
16 AS = "AS"
17
18 def __init__(
19 self,
20 name: str,
21 args: List[str] = None,
22 sortable: bool = False,
23 no_index: bool = False,
24 as_name: str = None,
25 ):
26 if args is None:
27 args = []
28 self.name = name
29 self.args = args
30 self.args_suffix = list()
31 self.as_name = as_name
32
33 if sortable:
34 self.args_suffix.append(Field.SORTABLE)
35 if no_index:
36 self.args_suffix.append(Field.NOINDEX)
37
38 if no_index and not sortable:
39 raise ValueError("Non-Sortable non-Indexable fields are ignored")
40
41 def append_arg(self, value):
42 self.args.append(value)
43
44 def redis_args(self):
45 args = [self.name]
46 if self.as_name:
47 args += [self.AS, self.as_name]
48 args += self.args
49 args += self.args_suffix
50 return args
51
52
53 class TextField(Field):
54 """
55 TextField is used to define a text field in a schema definition
56 """
57
58 NOSTEM = "NOSTEM"
59 PHONETIC = "PHONETIC"
60
61 def __init__(
62 self,
63 name: str,
64 weight: float = 1.0,
65 no_stem: bool = False,
66 phonetic_matcher: str = None,
67 **kwargs,
68 ):
69 Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)
70
71 if no_stem:
72 Field.append_arg(self, self.NOSTEM)
73 if phonetic_matcher and phonetic_matcher in [
74 "dm:en",
75 "dm:fr",
76 "dm:pt",
77 "dm:es",
78 ]:
79 Field.append_arg(self, self.PHONETIC)
80 Field.append_arg(self, phonetic_matcher)
81
82
83 class NumericField(Field):
84 """
85 NumericField is used to define a numeric field in a schema definition
86 """
87
88 def __init__(self, name: str, **kwargs):
89 Field.__init__(self, name, args=[Field.NUMERIC], **kwargs)
90
91
92 class GeoField(Field):
93 """
94 GeoField is used to define a geo-indexing field in a schema definition
95 """
96
97 def __init__(self, name: str, **kwargs):
98 Field.__init__(self, name, args=[Field.GEO], **kwargs)
99
100
101 class TagField(Field):
102 """
103 TagField is a tag-indexing field with simpler compression and tokenization.
104 See http://redisearch.io/Tags/
105 """
106
107 SEPARATOR = "SEPARATOR"
108
109 def __init__(self, name: str, separator: str = ",", **kwargs):
110 Field.__init__(
111 self, name, args=[Field.TAG, self.SEPARATOR, separator], **kwargs
112 )
113
114
115 class VectorField(Field):
116 """
117 Allows vector similarity queries against the value in this attribute.
118 See https://oss.redis.com/redisearch/Vectors/#vector_fields.
119 """
120
121 def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):
122 """
123 Create Vector Field. Notice that Vector cannot have sortable or no_index tag,
124 although it's also a Field.
125
126 ``name`` is the name of the field.
127
128 ``algorithm`` can be "FLAT" or "HNSW".
129
130 ``attributes`` each algorithm can have specific attributes. Some of them
131 are mandatory and some of them are optional. See
132 https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm
133 for more information.
134 """
135 sort = kwargs.get("sortable", False)
136 noindex = kwargs.get("no_index", False)
137
138 if sort or noindex:
139 raise DataError("Cannot set 'sortable' or 'no_index' in Vector fields.")
140
141 if algorithm.upper() not in ["FLAT", "HNSW"]:
142 raise DataError(
143 "Realtime vector indexing supporting 2 Indexing Methods:"
144 "'FLAT' and 'HNSW'."
145 )
146
147 attr_li = []
148
149 for key, value in attributes.items():
150 attr_li.extend([key, value])
151
152 Field.__init__(
153 self,
154 name,
155 args=[Field.VECTOR, algorithm, len(attr_li), *attr_li],
156 **kwargs,
157 )
158
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py
--- a/redis/commands/search/field.py
+++ b/redis/commands/search/field.py
@@ -105,11 +105,20 @@
"""
SEPARATOR = "SEPARATOR"
+ CASESENSITIVE = "CASESENSITIVE"
- def __init__(self, name: str, separator: str = ",", **kwargs):
- Field.__init__(
- self, name, args=[Field.TAG, self.SEPARATOR, separator], **kwargs
- )
+ def __init__(
+ self,
+ name: str,
+ separator: str = ",",
+ case_sensitive: bool = False,
+ **kwargs,
+ ):
+ args = [Field.TAG, self.SEPARATOR, separator]
+ if case_sensitive:
+ args.append(self.CASESENSITIVE)
+
+ Field.__init__(self, name, args=args, **kwargs)
class VectorField(Field):
| {"golden_diff": "diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py\n--- a/redis/commands/search/field.py\n+++ b/redis/commands/search/field.py\n@@ -105,11 +105,20 @@\n \"\"\"\n \n SEPARATOR = \"SEPARATOR\"\n+ CASESENSITIVE = \"CASESENSITIVE\"\n \n- def __init__(self, name: str, separator: str = \",\", **kwargs):\n- Field.__init__(\n- self, name, args=[Field.TAG, self.SEPARATOR, separator], **kwargs\n- )\n+ def __init__(\n+ self,\n+ name: str,\n+ separator: str = \",\",\n+ case_sensitive: bool = False,\n+ **kwargs,\n+ ):\n+ args = [Field.TAG, self.SEPARATOR, separator]\n+ if case_sensitive:\n+ args.append(self.CASESENSITIVE)\n+\n+ Field.__init__(self, name, args=args, **kwargs)\n \n \n class VectorField(Field):\n", "issue": "Support CASESENSITIVE tag in Tag Field \nlink: https://oss.redis.com/redisearch/Commands/#ftcreate\n", "before_files": [{"content": "from typing import List\n\nfrom redis import DataError\n\n\nclass Field:\n\n NUMERIC = \"NUMERIC\"\n TEXT = \"TEXT\"\n WEIGHT = \"WEIGHT\"\n GEO = \"GEO\"\n TAG = \"TAG\"\n VECTOR = \"VECTOR\"\n SORTABLE = \"SORTABLE\"\n NOINDEX = \"NOINDEX\"\n AS = \"AS\"\n\n def __init__(\n self,\n name: str,\n args: List[str] = None,\n sortable: bool = False,\n no_index: bool = False,\n as_name: str = None,\n ):\n if args is None:\n args = []\n self.name = name\n self.args = args\n self.args_suffix = list()\n self.as_name = as_name\n\n if sortable:\n self.args_suffix.append(Field.SORTABLE)\n if no_index:\n self.args_suffix.append(Field.NOINDEX)\n\n if no_index and not sortable:\n raise ValueError(\"Non-Sortable non-Indexable fields are ignored\")\n\n def append_arg(self, value):\n self.args.append(value)\n\n def redis_args(self):\n args = [self.name]\n if self.as_name:\n args += [self.AS, self.as_name]\n args += self.args\n args += self.args_suffix\n return args\n\n\nclass TextField(Field):\n \"\"\"\n TextField is used to define a text field in a schema definition\n \"\"\"\n\n NOSTEM = \"NOSTEM\"\n PHONETIC = \"PHONETIC\"\n\n def __init__(\n self,\n name: str,\n weight: float = 1.0,\n no_stem: bool = False,\n phonetic_matcher: str = None,\n **kwargs,\n ):\n Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)\n\n if no_stem:\n Field.append_arg(self, self.NOSTEM)\n if phonetic_matcher and phonetic_matcher in [\n \"dm:en\",\n \"dm:fr\",\n \"dm:pt\",\n \"dm:es\",\n ]:\n Field.append_arg(self, self.PHONETIC)\n Field.append_arg(self, phonetic_matcher)\n\n\nclass NumericField(Field):\n \"\"\"\n NumericField is used to define a numeric field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.NUMERIC], **kwargs)\n\n\nclass GeoField(Field):\n \"\"\"\n GeoField is used to define a geo-indexing field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.GEO], **kwargs)\n\n\nclass TagField(Field):\n \"\"\"\n TagField is a tag-indexing field with simpler compression and tokenization.\n See http://redisearch.io/Tags/\n \"\"\"\n\n SEPARATOR = \"SEPARATOR\"\n\n def __init__(self, name: str, separator: str = \",\", **kwargs):\n Field.__init__(\n self, name, args=[Field.TAG, self.SEPARATOR, separator], **kwargs\n )\n\n\nclass VectorField(Field):\n \"\"\"\n Allows vector similarity queries against the value in this attribute.\n See https://oss.redis.com/redisearch/Vectors/#vector_fields.\n \"\"\"\n\n def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):\n \"\"\"\n Create Vector Field. Notice that Vector cannot have sortable or no_index tag,\n although it's also a Field.\n\n ``name`` is the name of the field.\n\n ``algorithm`` can be \"FLAT\" or \"HNSW\".\n\n ``attributes`` each algorithm can have specific attributes. Some of them\n are mandatory and some of them are optional. See\n https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm\n for more information.\n \"\"\"\n sort = kwargs.get(\"sortable\", False)\n noindex = kwargs.get(\"no_index\", False)\n\n if sort or noindex:\n raise DataError(\"Cannot set 'sortable' or 'no_index' in Vector fields.\")\n\n if algorithm.upper() not in [\"FLAT\", \"HNSW\"]:\n raise DataError(\n \"Realtime vector indexing supporting 2 Indexing Methods:\"\n \"'FLAT' and 'HNSW'.\"\n )\n\n attr_li = []\n\n for key, value in attributes.items():\n attr_li.extend([key, value])\n\n Field.__init__(\n self,\n name,\n args=[Field.VECTOR, algorithm, len(attr_li), *attr_li],\n **kwargs,\n )\n", "path": "redis/commands/search/field.py"}], "after_files": [{"content": "from typing import List\n\nfrom redis import DataError\n\n\nclass Field:\n\n NUMERIC = \"NUMERIC\"\n TEXT = \"TEXT\"\n WEIGHT = \"WEIGHT\"\n GEO = \"GEO\"\n TAG = \"TAG\"\n VECTOR = \"VECTOR\"\n SORTABLE = \"SORTABLE\"\n NOINDEX = \"NOINDEX\"\n AS = \"AS\"\n\n def __init__(\n self,\n name: str,\n args: List[str] = None,\n sortable: bool = False,\n no_index: bool = False,\n as_name: str = None,\n ):\n if args is None:\n args = []\n self.name = name\n self.args = args\n self.args_suffix = list()\n self.as_name = as_name\n\n if sortable:\n self.args_suffix.append(Field.SORTABLE)\n if no_index:\n self.args_suffix.append(Field.NOINDEX)\n\n if no_index and not sortable:\n raise ValueError(\"Non-Sortable non-Indexable fields are ignored\")\n\n def append_arg(self, value):\n self.args.append(value)\n\n def redis_args(self):\n args = [self.name]\n if self.as_name:\n args += [self.AS, self.as_name]\n args += self.args\n args += self.args_suffix\n return args\n\n\nclass TextField(Field):\n \"\"\"\n TextField is used to define a text field in a schema definition\n \"\"\"\n\n NOSTEM = \"NOSTEM\"\n PHONETIC = \"PHONETIC\"\n\n def __init__(\n self,\n name: str,\n weight: float = 1.0,\n no_stem: bool = False,\n phonetic_matcher: str = None,\n **kwargs,\n ):\n Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)\n\n if no_stem:\n Field.append_arg(self, self.NOSTEM)\n if phonetic_matcher and phonetic_matcher in [\n \"dm:en\",\n \"dm:fr\",\n \"dm:pt\",\n \"dm:es\",\n ]:\n Field.append_arg(self, self.PHONETIC)\n Field.append_arg(self, phonetic_matcher)\n\n\nclass NumericField(Field):\n \"\"\"\n NumericField is used to define a numeric field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.NUMERIC], **kwargs)\n\n\nclass GeoField(Field):\n \"\"\"\n GeoField is used to define a geo-indexing field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.GEO], **kwargs)\n\n\nclass TagField(Field):\n \"\"\"\n TagField is a tag-indexing field with simpler compression and tokenization.\n See http://redisearch.io/Tags/\n \"\"\"\n\n SEPARATOR = \"SEPARATOR\"\n CASESENSITIVE = \"CASESENSITIVE\"\n\n def __init__(\n self,\n name: str,\n separator: str = \",\",\n case_sensitive: bool = False,\n **kwargs,\n ):\n args = [Field.TAG, self.SEPARATOR, separator]\n if case_sensitive:\n args.append(self.CASESENSITIVE)\n\n Field.__init__(self, name, args=args, **kwargs)\n\n\nclass VectorField(Field):\n \"\"\"\n Allows vector similarity queries against the value in this attribute.\n See https://oss.redis.com/redisearch/Vectors/#vector_fields.\n \"\"\"\n\n def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):\n \"\"\"\n Create Vector Field. Notice that Vector cannot have sortable or no_index tag,\n although it's also a Field.\n\n ``name`` is the name of the field.\n\n ``algorithm`` can be \"FLAT\" or \"HNSW\".\n\n ``attributes`` each algorithm can have specific attributes. Some of them\n are mandatory and some of them are optional. See\n https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm\n for more information.\n \"\"\"\n sort = kwargs.get(\"sortable\", False)\n noindex = kwargs.get(\"no_index\", False)\n\n if sort or noindex:\n raise DataError(\"Cannot set 'sortable' or 'no_index' in Vector fields.\")\n\n if algorithm.upper() not in [\"FLAT\", \"HNSW\"]:\n raise DataError(\n \"Realtime vector indexing supporting 2 Indexing Methods:\"\n \"'FLAT' and 'HNSW'.\"\n )\n\n attr_li = []\n\n for key, value in attributes.items():\n attr_li.extend([key, value])\n\n Field.__init__(\n self,\n name,\n args=[Field.VECTOR, algorithm, len(attr_li), *attr_li],\n **kwargs,\n )\n", "path": "redis/commands/search/field.py"}]} | 1,680 | 230 |
gh_patches_debug_61829 | rasdani/github-patches | git_diff | pulp__pulpcore-4010 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
RESTAPI document fix for Upstream Pulp Replication API
**Version**
Pulp installed through the Python modules.
"core:3.28.0"
"certguard:3.28.0"
"file:3.28.0"
"python:3.28.0"
"rpm:3.28.0"
**Describe the bug**
Why the attributes of **upstream_pulps_create**/**update** is mentioned again in the **upstream_pulps_replicate" document? Are those attributes (base_url, api_root, domain,...) used at time making an API request "https://PULP-SERVER/pulp/api/v3/upstream_pulps/{object_id}/replicate/"?
**To Reproduce**
None.
**Expected behavior**
A fix is required in the REST API document.
**Additional context**
Create Upstream Pulp API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_create
Upstream Replication API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_replicate
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/viewsets/replica.py`
Content:
```
1 """
2 ViewSet for replicating repositories and distributions from an upstream Pulp
3 """
4 from django.conf import settings
5 from drf_spectacular.utils import extend_schema
6 from rest_framework import mixins
7 from rest_framework.decorators import action
8
9 from pulpcore.app.models import TaskGroup, UpstreamPulp
10 from pulpcore.app.serializers import AsyncOperationResponseSerializer, UpstreamPulpSerializer
11 from pulpcore.app.viewsets import NamedModelViewSet
12 from pulpcore.app.response import TaskGroupOperationResponse
13 from pulpcore.app.tasks import replicate_distributions
14 from pulpcore.tasking.tasks import dispatch
15
16
17 class UpstreamPulpViewSet(
18 NamedModelViewSet,
19 mixins.CreateModelMixin,
20 mixins.RetrieveModelMixin,
21 mixins.ListModelMixin,
22 mixins.DestroyModelMixin,
23 mixins.UpdateModelMixin,
24 ):
25 """API for configuring an upstream Pulp to replicate. This API is provided as a tech preview."""
26
27 queryset = UpstreamPulp.objects.all()
28 endpoint_name = "upstream-pulps"
29 serializer_class = UpstreamPulpSerializer
30 ordering = "-pulp_created"
31
32 @extend_schema(
33 summary="Replicate",
34 description="Trigger an asynchronous repository replication task group. This API is "
35 "provided as a tech preview.",
36 responses={202: AsyncOperationResponseSerializer},
37 )
38 @action(detail=True, methods=["post"])
39 def replicate(self, request, pk):
40 """
41 Triggers an asynchronous repository replication operation.
42 """
43 server = UpstreamPulp.objects.get(pk=pk)
44 task_group = TaskGroup.objects.create(description=f"Replication of {server.name}")
45
46 uri = "/api/v3/servers/"
47 if settings.DOMAIN_ENABLED:
48 uri = f"/{request.domain.name}{uri}"
49
50 dispatch(
51 replicate_distributions,
52 exclusive_resources=[uri],
53 kwargs={"server_pk": pk},
54 task_group=task_group,
55 )
56
57 return TaskGroupOperationResponse(task_group, request)
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/app/viewsets/replica.py b/pulpcore/app/viewsets/replica.py
--- a/pulpcore/app/viewsets/replica.py
+++ b/pulpcore/app/viewsets/replica.py
@@ -33,6 +33,7 @@
summary="Replicate",
description="Trigger an asynchronous repository replication task group. This API is "
"provided as a tech preview.",
+ request=None,
responses={202: AsyncOperationResponseSerializer},
)
@action(detail=True, methods=["post"])
| {"golden_diff": "diff --git a/pulpcore/app/viewsets/replica.py b/pulpcore/app/viewsets/replica.py\n--- a/pulpcore/app/viewsets/replica.py\n+++ b/pulpcore/app/viewsets/replica.py\n@@ -33,6 +33,7 @@\n summary=\"Replicate\",\n description=\"Trigger an asynchronous repository replication task group. This API is \"\n \"provided as a tech preview.\",\n+ request=None,\n responses={202: AsyncOperationResponseSerializer},\n )\n @action(detail=True, methods=[\"post\"])\n", "issue": "RESTAPI document fix for Upstream Pulp Replication API\n**Version**\r\nPulp installed through the Python modules.\r\n\"core:3.28.0\"\r\n\"certguard:3.28.0\"\r\n\"file:3.28.0\"\r\n\"python:3.28.0\"\r\n\"rpm:3.28.0\"\r\n\r\n**Describe the bug**\r\nWhy the attributes of **upstream_pulps_create**/**update** is mentioned again in the **upstream_pulps_replicate\" document? Are those attributes (base_url, api_root, domain,...) used at time making an API request \"https://PULP-SERVER/pulp/api/v3/upstream_pulps/{object_id}/replicate/\"?\r\n\r\n**To Reproduce**\r\nNone.\r\n\r\n**Expected behavior**\r\nA fix is required in the REST API document.\r\n\r\n**Additional context**\r\nCreate Upstream Pulp API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_create\r\nUpstream Replication API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_replicate\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nViewSet for replicating repositories and distributions from an upstream Pulp\n\"\"\"\nfrom django.conf import settings\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework import mixins\nfrom rest_framework.decorators import action\n\nfrom pulpcore.app.models import TaskGroup, UpstreamPulp\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, UpstreamPulpSerializer\nfrom pulpcore.app.viewsets import NamedModelViewSet\nfrom pulpcore.app.response import TaskGroupOperationResponse\nfrom pulpcore.app.tasks import replicate_distributions\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass UpstreamPulpViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.DestroyModelMixin,\n mixins.UpdateModelMixin,\n):\n \"\"\"API for configuring an upstream Pulp to replicate. This API is provided as a tech preview.\"\"\"\n\n queryset = UpstreamPulp.objects.all()\n endpoint_name = \"upstream-pulps\"\n serializer_class = UpstreamPulpSerializer\n ordering = \"-pulp_created\"\n\n @extend_schema(\n summary=\"Replicate\",\n description=\"Trigger an asynchronous repository replication task group. This API is \"\n \"provided as a tech preview.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n @action(detail=True, methods=[\"post\"])\n def replicate(self, request, pk):\n \"\"\"\n Triggers an asynchronous repository replication operation.\n \"\"\"\n server = UpstreamPulp.objects.get(pk=pk)\n task_group = TaskGroup.objects.create(description=f\"Replication of {server.name}\")\n\n uri = \"/api/v3/servers/\"\n if settings.DOMAIN_ENABLED:\n uri = f\"/{request.domain.name}{uri}\"\n\n dispatch(\n replicate_distributions,\n exclusive_resources=[uri],\n kwargs={\"server_pk\": pk},\n task_group=task_group,\n )\n\n return TaskGroupOperationResponse(task_group, request)\n", "path": "pulpcore/app/viewsets/replica.py"}], "after_files": [{"content": "\"\"\"\nViewSet for replicating repositories and distributions from an upstream Pulp\n\"\"\"\nfrom django.conf import settings\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework import mixins\nfrom rest_framework.decorators import action\n\nfrom pulpcore.app.models import TaskGroup, UpstreamPulp\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, UpstreamPulpSerializer\nfrom pulpcore.app.viewsets import NamedModelViewSet\nfrom pulpcore.app.response import TaskGroupOperationResponse\nfrom pulpcore.app.tasks import replicate_distributions\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass UpstreamPulpViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.DestroyModelMixin,\n mixins.UpdateModelMixin,\n):\n \"\"\"API for configuring an upstream Pulp to replicate. This API is provided as a tech preview.\"\"\"\n\n queryset = UpstreamPulp.objects.all()\n endpoint_name = \"upstream-pulps\"\n serializer_class = UpstreamPulpSerializer\n ordering = \"-pulp_created\"\n\n @extend_schema(\n summary=\"Replicate\",\n description=\"Trigger an asynchronous repository replication task group. This API is \"\n \"provided as a tech preview.\",\n request=None,\n responses={202: AsyncOperationResponseSerializer},\n )\n @action(detail=True, methods=[\"post\"])\n def replicate(self, request, pk):\n \"\"\"\n Triggers an asynchronous repository replication operation.\n \"\"\"\n server = UpstreamPulp.objects.get(pk=pk)\n task_group = TaskGroup.objects.create(description=f\"Replication of {server.name}\")\n\n uri = \"/api/v3/servers/\"\n if settings.DOMAIN_ENABLED:\n uri = f\"/{request.domain.name}{uri}\"\n\n dispatch(\n replicate_distributions,\n exclusive_resources=[uri],\n kwargs={\"server_pk\": pk},\n task_group=task_group,\n )\n\n return TaskGroupOperationResponse(task_group, request)\n", "path": "pulpcore/app/viewsets/replica.py"}]} | 1,049 | 122 |
gh_patches_debug_44395 | rasdani/github-patches | git_diff | mkdocs__mkdocs-2477 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mkdocs 1.2.x livereload no longer working
mkdocs==1.2 and latest no longer works
mkdocs==1.1.2 works
MacOS 11.4
Docker version 20.10.7, build f0df350
python:3.8-alpine (3.9.5 and 10-rc same behavior)
I checked the Docker container and the markdown is updating but the generated html is not. This seems to be associated with the 1.2 change with watcher.
LMK what logs/config/diag I can provide to help RCA the issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/livereload/__init__.py`
Content:
```
1 import functools
2 import io
3 import logging
4 import mimetypes
5 import os
6 import os.path
7 import pathlib
8 import re
9 import socketserver
10 import threading
11 import time
12 import warnings
13 import wsgiref.simple_server
14
15 import watchdog.events
16 import watchdog.observers
17
18
19 class _LoggerAdapter(logging.LoggerAdapter):
20 def process(self, msg, kwargs):
21 return time.strftime("[%H:%M:%S] ") + msg, kwargs
22
23
24 log = _LoggerAdapter(logging.getLogger(__name__), {})
25
26
27 class LiveReloadServer(socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer):
28 daemon_threads = True
29 poll_response_timeout = 60
30
31 def __init__(
32 self,
33 builder,
34 host,
35 port,
36 root,
37 mount_path="/",
38 build_delay=0.25,
39 shutdown_delay=0.25,
40 **kwargs,
41 ):
42 self.builder = builder
43 self.server_name = host
44 self.server_port = port
45 self.root = os.path.abspath(root)
46 self.mount_path = ("/" + mount_path.lstrip("/")).rstrip("/") + "/"
47 self.url = f"http://{self.server_name}:{self.server_port}{self.mount_path}"
48 self.build_delay = build_delay
49 self.shutdown_delay = shutdown_delay
50 # To allow custom error pages.
51 self.error_handler = lambda code: None
52
53 super().__init__((host, port), _Handler, **kwargs)
54 self.set_app(self.serve_request)
55
56 self._wanted_epoch = _timestamp() # The version of the site that started building.
57 self._visible_epoch = self._wanted_epoch # Latest fully built version of the site.
58 self._epoch_cond = threading.Condition() # Must be held when accessing _visible_epoch.
59
60 self._to_rebuild = {} # Used as an ordered set of functions to call.
61 self._rebuild_cond = threading.Condition() # Must be held when accessing _to_rebuild.
62
63 self._shutdown = False
64 self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))
65 self.observer = watchdog.observers.Observer(timeout=shutdown_delay)
66
67 def watch(self, path, func=None, recursive=True):
68 """Add the 'path' to watched paths, call the function and reload when any file changes under it."""
69 path = os.path.abspath(path)
70 if func in (None, self.builder):
71 func = self.builder
72 else:
73 warnings.warn(
74 "Plugins should not pass the 'func' parameter of watch(). "
75 "The ability to execute custom callbacks will be removed soon.",
76 DeprecationWarning,
77 stacklevel=2,
78 )
79
80 def callback(event, allowed_path=None):
81 if isinstance(event, watchdog.events.DirCreatedEvent):
82 return
83 if allowed_path is not None and event.src_path != allowed_path:
84 return
85 # Text editors always cause a "file close" event in addition to "modified" when saving
86 # a file. Some editors also have "swap" functionality that keeps writing into another
87 # file that's never closed. Prevent such write events from causing a rebuild.
88 if isinstance(event, watchdog.events.FileModifiedEvent):
89 # But FileClosedEvent is implemented only on Linux, otherwise we mustn't skip this:
90 if type(self.observer).__name__ == "InotifyObserver":
91 return
92 log.debug(str(event))
93 with self._rebuild_cond:
94 self._to_rebuild[func] = True
95 self._rebuild_cond.notify_all()
96
97 dir_handler = watchdog.events.FileSystemEventHandler()
98 dir_handler.on_any_event = callback
99
100 seen = set()
101
102 def schedule(path):
103 seen.add(path)
104 if path.is_file():
105 # Watchdog doesn't support watching files, so watch its directory and filter by path
106 handler = watchdog.events.FileSystemEventHandler()
107 handler.on_any_event = lambda event: callback(event, allowed_path=os.fspath(path))
108
109 parent = path.parent
110 log.debug(f"Watching file '{path}' through directory '{parent}'")
111 self.observer.schedule(handler, parent)
112 else:
113 log.debug(f"Watching directory '{path}'")
114 self.observer.schedule(dir_handler, path, recursive=recursive)
115
116 schedule(pathlib.Path(path).resolve())
117
118 def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path
119 if path_obj.is_symlink():
120 path_obj = pathlib.Path(path_obj).resolve()
121 if path_obj in seen or not path_obj.exists():
122 return
123 schedule(path_obj)
124
125 if path_obj.is_dir() and recursive:
126 with os.scandir(os.fspath(path_obj)) as scan:
127 for entry in scan:
128 watch_symlink_targets(entry)
129
130 watch_symlink_targets(pathlib.Path(path))
131
132 def serve(self):
133 self.observer.start()
134
135 log.info(f"Serving on {self.url}")
136 self.serve_thread.start()
137
138 self._build_loop()
139
140 def _build_loop(self):
141 while True:
142 with self._rebuild_cond:
143 while not self._rebuild_cond.wait_for(
144 lambda: self._to_rebuild or self._shutdown, timeout=self.shutdown_delay
145 ):
146 # We could have used just one wait instead of a loop + timeout, but we need
147 # occasional breaks, otherwise on Windows we can't receive KeyboardInterrupt.
148 pass
149 if self._shutdown:
150 break
151 log.info("Detected file changes")
152 while self._rebuild_cond.wait(timeout=self.build_delay):
153 log.debug("Waiting for file changes to stop happening")
154
155 self._wanted_epoch = _timestamp()
156 funcs = list(self._to_rebuild)
157 self._to_rebuild.clear()
158
159 for func in funcs:
160 func()
161
162 with self._epoch_cond:
163 log.info("Reloading browsers")
164 self._visible_epoch = self._wanted_epoch
165 self._epoch_cond.notify_all()
166
167 def shutdown(self):
168 self.observer.stop()
169 with self._rebuild_cond:
170 self._shutdown = True
171 self._rebuild_cond.notify_all()
172
173 if self.serve_thread.is_alive():
174 super().shutdown()
175 self.serve_thread.join()
176 self.observer.join()
177
178 def serve_request(self, environ, start_response):
179 try:
180 result = self._serve_request(environ, start_response)
181 except Exception:
182 code = 500
183 msg = "500 Internal Server Error"
184 log.exception(msg)
185 else:
186 if result is not None:
187 return result
188 code = 404
189 msg = "404 Not Found"
190
191 error_content = None
192 try:
193 error_content = self.error_handler(code)
194 except Exception:
195 log.exception("Failed to render an error message!")
196 if error_content is None:
197 error_content = msg.encode()
198
199 start_response(msg, [("Content-Type", "text/html")])
200 return [error_content]
201
202 def _serve_request(self, environ, start_response):
203 path = environ["PATH_INFO"]
204
205 m = re.fullmatch(r"/livereload/([0-9]+)/[0-9]+", path)
206 if m:
207 epoch = int(m[1])
208 start_response("200 OK", [("Content-Type", "text/plain")])
209
210 def condition():
211 return self._visible_epoch > epoch
212
213 with self._epoch_cond:
214 if not condition():
215 # Stall the browser, respond as soon as there's something new.
216 # If there's not, respond anyway after a minute.
217 self._log_poll_request(environ.get("HTTP_REFERER"), request_id=path)
218 self._epoch_cond.wait_for(condition, timeout=self.poll_response_timeout)
219 return [b"%d" % self._visible_epoch]
220
221 if path == "/js/livereload.js":
222 file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "livereload.js")
223 elif path.startswith(self.mount_path):
224 if path.endswith("/"):
225 path += "index.html"
226 path = path[len(self.mount_path):]
227 file_path = os.path.join(self.root, path.lstrip("/"))
228 elif path == "/":
229 start_response("302 Found", [("Location", self.mount_path)])
230 return []
231 else:
232 return None # Not found
233
234 # Wait until the ongoing rebuild (if any) finishes, so we're not serving a half-built site.
235 with self._epoch_cond:
236 self._epoch_cond.wait_for(lambda: self._visible_epoch == self._wanted_epoch)
237 epoch = self._visible_epoch
238
239 try:
240 file = open(file_path, "rb")
241 except OSError:
242 return None # Not found
243
244 if path.endswith(".html"):
245 with file:
246 content = file.read()
247 content = self._inject_js_into_html(content, epoch)
248 file = io.BytesIO(content)
249 content_length = len(content)
250 else:
251 content_length = os.path.getsize(file_path)
252
253 content_type = self._guess_type(file_path)
254 start_response(
255 "200 OK", [("Content-Type", content_type), ("Content-Length", str(content_length))]
256 )
257 return wsgiref.util.FileWrapper(file)
258
259 @classmethod
260 def _inject_js_into_html(cls, content, epoch):
261 try:
262 body_end = content.rindex(b"</body>")
263 except ValueError:
264 body_end = len(content)
265 # The page will reload if the livereload poller returns a newer epoch than what it knows.
266 # The other timestamp becomes just a unique identifier for the initiating page.
267 return (
268 b'%b<script src="/js/livereload.js"></script><script>livereload(%d, %d);</script>%b'
269 % (content[:body_end], epoch, _timestamp(), content[body_end:])
270 )
271
272 @classmethod
273 @functools.lru_cache() # "Cache" to not repeat the same message for the same browser tab.
274 def _log_poll_request(cls, url, request_id):
275 log.info(f"Browser connected: {url}")
276
277 def _guess_type(cls, path):
278 # MkDocs only ensures a few common types (as seen in livereload_tests.py::test_mime_types).
279 # Other uncommon types will not be accepted.
280 if path.endswith((".js", ".JS")):
281 return "application/javascript"
282 if path.endswith(".gz"):
283 return "application/gzip"
284
285 guess, _ = mimetypes.guess_type(path)
286 if guess:
287 return guess
288 return "application/octet-stream"
289
290
291 class _Handler(wsgiref.simple_server.WSGIRequestHandler):
292 def log_request(self, code="-", size="-"):
293 level = logging.DEBUG if str(code) == "200" else logging.WARNING
294 log.log(level, f'"{self.requestline}" code {code}')
295
296 def log_message(self, format, *args):
297 log.debug(format, *args)
298
299
300 def _timestamp():
301 return round(time.monotonic() * 1000)
302
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mkdocs/livereload/__init__.py b/mkdocs/livereload/__init__.py
--- a/mkdocs/livereload/__init__.py
+++ b/mkdocs/livereload/__init__.py
@@ -4,7 +4,6 @@
import mimetypes
import os
import os.path
-import pathlib
import re
import socketserver
import threading
@@ -13,7 +12,7 @@
import wsgiref.simple_server
import watchdog.events
-import watchdog.observers
+import watchdog.observers.polling
class _LoggerAdapter(logging.LoggerAdapter):
@@ -35,7 +34,7 @@
port,
root,
mount_path="/",
- build_delay=0.25,
+ polling_interval=0.5,
shutdown_delay=0.25,
**kwargs,
):
@@ -45,7 +44,7 @@
self.root = os.path.abspath(root)
self.mount_path = ("/" + mount_path.lstrip("/")).rstrip("/") + "/"
self.url = f"http://{self.server_name}:{self.server_port}{self.mount_path}"
- self.build_delay = build_delay
+ self.build_delay = 0.1
self.shutdown_delay = shutdown_delay
# To allow custom error pages.
self.error_handler = lambda code: None
@@ -62,7 +61,7 @@
self._shutdown = False
self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))
- self.observer = watchdog.observers.Observer(timeout=shutdown_delay)
+ self.observer = watchdog.observers.polling.PollingObserver(timeout=polling_interval)
def watch(self, path, func=None, recursive=True):
"""Add the 'path' to watched paths, call the function and reload when any file changes under it."""
@@ -77,57 +76,18 @@
stacklevel=2,
)
- def callback(event, allowed_path=None):
- if isinstance(event, watchdog.events.DirCreatedEvent):
+ def callback(event):
+ if event.is_directory:
return
- if allowed_path is not None and event.src_path != allowed_path:
- return
- # Text editors always cause a "file close" event in addition to "modified" when saving
- # a file. Some editors also have "swap" functionality that keeps writing into another
- # file that's never closed. Prevent such write events from causing a rebuild.
- if isinstance(event, watchdog.events.FileModifiedEvent):
- # But FileClosedEvent is implemented only on Linux, otherwise we mustn't skip this:
- if type(self.observer).__name__ == "InotifyObserver":
- return
log.debug(str(event))
with self._rebuild_cond:
self._to_rebuild[func] = True
self._rebuild_cond.notify_all()
- dir_handler = watchdog.events.FileSystemEventHandler()
- dir_handler.on_any_event = callback
-
- seen = set()
-
- def schedule(path):
- seen.add(path)
- if path.is_file():
- # Watchdog doesn't support watching files, so watch its directory and filter by path
- handler = watchdog.events.FileSystemEventHandler()
- handler.on_any_event = lambda event: callback(event, allowed_path=os.fspath(path))
-
- parent = path.parent
- log.debug(f"Watching file '{path}' through directory '{parent}'")
- self.observer.schedule(handler, parent)
- else:
- log.debug(f"Watching directory '{path}'")
- self.observer.schedule(dir_handler, path, recursive=recursive)
-
- schedule(pathlib.Path(path).resolve())
-
- def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path
- if path_obj.is_symlink():
- path_obj = pathlib.Path(path_obj).resolve()
- if path_obj in seen or not path_obj.exists():
- return
- schedule(path_obj)
-
- if path_obj.is_dir() and recursive:
- with os.scandir(os.fspath(path_obj)) as scan:
- for entry in scan:
- watch_symlink_targets(entry)
-
- watch_symlink_targets(pathlib.Path(path))
+ handler = watchdog.events.FileSystemEventHandler()
+ handler.on_any_event = callback
+ log.debug(f"Watching '{path}'")
+ self.observer.schedule(handler, path, recursive=recursive)
def serve(self):
self.observer.start()
| {"golden_diff": "diff --git a/mkdocs/livereload/__init__.py b/mkdocs/livereload/__init__.py\n--- a/mkdocs/livereload/__init__.py\n+++ b/mkdocs/livereload/__init__.py\n@@ -4,7 +4,6 @@\n import mimetypes\n import os\n import os.path\n-import pathlib\n import re\n import socketserver\n import threading\n@@ -13,7 +12,7 @@\n import wsgiref.simple_server\n \n import watchdog.events\n-import watchdog.observers\n+import watchdog.observers.polling\n \n \n class _LoggerAdapter(logging.LoggerAdapter):\n@@ -35,7 +34,7 @@\n port,\n root,\n mount_path=\"/\",\n- build_delay=0.25,\n+ polling_interval=0.5,\n shutdown_delay=0.25,\n **kwargs,\n ):\n@@ -45,7 +44,7 @@\n self.root = os.path.abspath(root)\n self.mount_path = (\"/\" + mount_path.lstrip(\"/\")).rstrip(\"/\") + \"/\"\n self.url = f\"http://{self.server_name}:{self.server_port}{self.mount_path}\"\n- self.build_delay = build_delay\n+ self.build_delay = 0.1\n self.shutdown_delay = shutdown_delay\n # To allow custom error pages.\n self.error_handler = lambda code: None\n@@ -62,7 +61,7 @@\n \n self._shutdown = False\n self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))\n- self.observer = watchdog.observers.Observer(timeout=shutdown_delay)\n+ self.observer = watchdog.observers.polling.PollingObserver(timeout=polling_interval)\n \n def watch(self, path, func=None, recursive=True):\n \"\"\"Add the 'path' to watched paths, call the function and reload when any file changes under it.\"\"\"\n@@ -77,57 +76,18 @@\n stacklevel=2,\n )\n \n- def callback(event, allowed_path=None):\n- if isinstance(event, watchdog.events.DirCreatedEvent):\n+ def callback(event):\n+ if event.is_directory:\n return\n- if allowed_path is not None and event.src_path != allowed_path:\n- return\n- # Text editors always cause a \"file close\" event in addition to \"modified\" when saving\n- # a file. Some editors also have \"swap\" functionality that keeps writing into another\n- # file that's never closed. Prevent such write events from causing a rebuild.\n- if isinstance(event, watchdog.events.FileModifiedEvent):\n- # But FileClosedEvent is implemented only on Linux, otherwise we mustn't skip this:\n- if type(self.observer).__name__ == \"InotifyObserver\":\n- return\n log.debug(str(event))\n with self._rebuild_cond:\n self._to_rebuild[func] = True\n self._rebuild_cond.notify_all()\n \n- dir_handler = watchdog.events.FileSystemEventHandler()\n- dir_handler.on_any_event = callback\n-\n- seen = set()\n-\n- def schedule(path):\n- seen.add(path)\n- if path.is_file():\n- # Watchdog doesn't support watching files, so watch its directory and filter by path\n- handler = watchdog.events.FileSystemEventHandler()\n- handler.on_any_event = lambda event: callback(event, allowed_path=os.fspath(path))\n-\n- parent = path.parent\n- log.debug(f\"Watching file '{path}' through directory '{parent}'\")\n- self.observer.schedule(handler, parent)\n- else:\n- log.debug(f\"Watching directory '{path}'\")\n- self.observer.schedule(dir_handler, path, recursive=recursive)\n-\n- schedule(pathlib.Path(path).resolve())\n-\n- def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path\n- if path_obj.is_symlink():\n- path_obj = pathlib.Path(path_obj).resolve()\n- if path_obj in seen or not path_obj.exists():\n- return\n- schedule(path_obj)\n-\n- if path_obj.is_dir() and recursive:\n- with os.scandir(os.fspath(path_obj)) as scan:\n- for entry in scan:\n- watch_symlink_targets(entry)\n-\n- watch_symlink_targets(pathlib.Path(path))\n+ handler = watchdog.events.FileSystemEventHandler()\n+ handler.on_any_event = callback\n+ log.debug(f\"Watching '{path}'\")\n+ self.observer.schedule(handler, path, recursive=recursive)\n \n def serve(self):\n self.observer.start()\n", "issue": "mkdocs 1.2.x livereload no longer working\nmkdocs==1.2 and latest no longer works\r\nmkdocs==1.1.2 works\r\n\r\nMacOS 11.4\r\nDocker version 20.10.7, build f0df350\r\npython:3.8-alpine (3.9.5 and 10-rc same behavior)\r\n\r\nI checked the Docker container and the markdown is updating but the generated html is not. This seems to be associated with the 1.2 change with watcher.\r\n\r\nLMK what logs/config/diag I can provide to help RCA the issue.\n", "before_files": [{"content": "import functools\nimport io\nimport logging\nimport mimetypes\nimport os\nimport os.path\nimport pathlib\nimport re\nimport socketserver\nimport threading\nimport time\nimport warnings\nimport wsgiref.simple_server\n\nimport watchdog.events\nimport watchdog.observers\n\n\nclass _LoggerAdapter(logging.LoggerAdapter):\n def process(self, msg, kwargs):\n return time.strftime(\"[%H:%M:%S] \") + msg, kwargs\n\n\nlog = _LoggerAdapter(logging.getLogger(__name__), {})\n\n\nclass LiveReloadServer(socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer):\n daemon_threads = True\n poll_response_timeout = 60\n\n def __init__(\n self,\n builder,\n host,\n port,\n root,\n mount_path=\"/\",\n build_delay=0.25,\n shutdown_delay=0.25,\n **kwargs,\n ):\n self.builder = builder\n self.server_name = host\n self.server_port = port\n self.root = os.path.abspath(root)\n self.mount_path = (\"/\" + mount_path.lstrip(\"/\")).rstrip(\"/\") + \"/\"\n self.url = f\"http://{self.server_name}:{self.server_port}{self.mount_path}\"\n self.build_delay = build_delay\n self.shutdown_delay = shutdown_delay\n # To allow custom error pages.\n self.error_handler = lambda code: None\n\n super().__init__((host, port), _Handler, **kwargs)\n self.set_app(self.serve_request)\n\n self._wanted_epoch = _timestamp() # The version of the site that started building.\n self._visible_epoch = self._wanted_epoch # Latest fully built version of the site.\n self._epoch_cond = threading.Condition() # Must be held when accessing _visible_epoch.\n\n self._to_rebuild = {} # Used as an ordered set of functions to call.\n self._rebuild_cond = threading.Condition() # Must be held when accessing _to_rebuild.\n\n self._shutdown = False\n self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))\n self.observer = watchdog.observers.Observer(timeout=shutdown_delay)\n\n def watch(self, path, func=None, recursive=True):\n \"\"\"Add the 'path' to watched paths, call the function and reload when any file changes under it.\"\"\"\n path = os.path.abspath(path)\n if func in (None, self.builder):\n func = self.builder\n else:\n warnings.warn(\n \"Plugins should not pass the 'func' parameter of watch(). \"\n \"The ability to execute custom callbacks will be removed soon.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n def callback(event, allowed_path=None):\n if isinstance(event, watchdog.events.DirCreatedEvent):\n return\n if allowed_path is not None and event.src_path != allowed_path:\n return\n # Text editors always cause a \"file close\" event in addition to \"modified\" when saving\n # a file. Some editors also have \"swap\" functionality that keeps writing into another\n # file that's never closed. Prevent such write events from causing a rebuild.\n if isinstance(event, watchdog.events.FileModifiedEvent):\n # But FileClosedEvent is implemented only on Linux, otherwise we mustn't skip this:\n if type(self.observer).__name__ == \"InotifyObserver\":\n return\n log.debug(str(event))\n with self._rebuild_cond:\n self._to_rebuild[func] = True\n self._rebuild_cond.notify_all()\n\n dir_handler = watchdog.events.FileSystemEventHandler()\n dir_handler.on_any_event = callback\n\n seen = set()\n\n def schedule(path):\n seen.add(path)\n if path.is_file():\n # Watchdog doesn't support watching files, so watch its directory and filter by path\n handler = watchdog.events.FileSystemEventHandler()\n handler.on_any_event = lambda event: callback(event, allowed_path=os.fspath(path))\n\n parent = path.parent\n log.debug(f\"Watching file '{path}' through directory '{parent}'\")\n self.observer.schedule(handler, parent)\n else:\n log.debug(f\"Watching directory '{path}'\")\n self.observer.schedule(dir_handler, path, recursive=recursive)\n\n schedule(pathlib.Path(path).resolve())\n\n def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path\n if path_obj.is_symlink():\n path_obj = pathlib.Path(path_obj).resolve()\n if path_obj in seen or not path_obj.exists():\n return\n schedule(path_obj)\n\n if path_obj.is_dir() and recursive:\n with os.scandir(os.fspath(path_obj)) as scan:\n for entry in scan:\n watch_symlink_targets(entry)\n\n watch_symlink_targets(pathlib.Path(path))\n\n def serve(self):\n self.observer.start()\n\n log.info(f\"Serving on {self.url}\")\n self.serve_thread.start()\n\n self._build_loop()\n\n def _build_loop(self):\n while True:\n with self._rebuild_cond:\n while not self._rebuild_cond.wait_for(\n lambda: self._to_rebuild or self._shutdown, timeout=self.shutdown_delay\n ):\n # We could have used just one wait instead of a loop + timeout, but we need\n # occasional breaks, otherwise on Windows we can't receive KeyboardInterrupt.\n pass\n if self._shutdown:\n break\n log.info(\"Detected file changes\")\n while self._rebuild_cond.wait(timeout=self.build_delay):\n log.debug(\"Waiting for file changes to stop happening\")\n\n self._wanted_epoch = _timestamp()\n funcs = list(self._to_rebuild)\n self._to_rebuild.clear()\n\n for func in funcs:\n func()\n\n with self._epoch_cond:\n log.info(\"Reloading browsers\")\n self._visible_epoch = self._wanted_epoch\n self._epoch_cond.notify_all()\n\n def shutdown(self):\n self.observer.stop()\n with self._rebuild_cond:\n self._shutdown = True\n self._rebuild_cond.notify_all()\n\n if self.serve_thread.is_alive():\n super().shutdown()\n self.serve_thread.join()\n self.observer.join()\n\n def serve_request(self, environ, start_response):\n try:\n result = self._serve_request(environ, start_response)\n except Exception:\n code = 500\n msg = \"500 Internal Server Error\"\n log.exception(msg)\n else:\n if result is not None:\n return result\n code = 404\n msg = \"404 Not Found\"\n\n error_content = None\n try:\n error_content = self.error_handler(code)\n except Exception:\n log.exception(\"Failed to render an error message!\")\n if error_content is None:\n error_content = msg.encode()\n\n start_response(msg, [(\"Content-Type\", \"text/html\")])\n return [error_content]\n\n def _serve_request(self, environ, start_response):\n path = environ[\"PATH_INFO\"]\n\n m = re.fullmatch(r\"/livereload/([0-9]+)/[0-9]+\", path)\n if m:\n epoch = int(m[1])\n start_response(\"200 OK\", [(\"Content-Type\", \"text/plain\")])\n\n def condition():\n return self._visible_epoch > epoch\n\n with self._epoch_cond:\n if not condition():\n # Stall the browser, respond as soon as there's something new.\n # If there's not, respond anyway after a minute.\n self._log_poll_request(environ.get(\"HTTP_REFERER\"), request_id=path)\n self._epoch_cond.wait_for(condition, timeout=self.poll_response_timeout)\n return [b\"%d\" % self._visible_epoch]\n\n if path == \"/js/livereload.js\":\n file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"livereload.js\")\n elif path.startswith(self.mount_path):\n if path.endswith(\"/\"):\n path += \"index.html\"\n path = path[len(self.mount_path):]\n file_path = os.path.join(self.root, path.lstrip(\"/\"))\n elif path == \"/\":\n start_response(\"302 Found\", [(\"Location\", self.mount_path)])\n return []\n else:\n return None # Not found\n\n # Wait until the ongoing rebuild (if any) finishes, so we're not serving a half-built site.\n with self._epoch_cond:\n self._epoch_cond.wait_for(lambda: self._visible_epoch == self._wanted_epoch)\n epoch = self._visible_epoch\n\n try:\n file = open(file_path, \"rb\")\n except OSError:\n return None # Not found\n\n if path.endswith(\".html\"):\n with file:\n content = file.read()\n content = self._inject_js_into_html(content, epoch)\n file = io.BytesIO(content)\n content_length = len(content)\n else:\n content_length = os.path.getsize(file_path)\n\n content_type = self._guess_type(file_path)\n start_response(\n \"200 OK\", [(\"Content-Type\", content_type), (\"Content-Length\", str(content_length))]\n )\n return wsgiref.util.FileWrapper(file)\n\n @classmethod\n def _inject_js_into_html(cls, content, epoch):\n try:\n body_end = content.rindex(b\"</body>\")\n except ValueError:\n body_end = len(content)\n # The page will reload if the livereload poller returns a newer epoch than what it knows.\n # The other timestamp becomes just a unique identifier for the initiating page.\n return (\n b'%b<script src=\"/js/livereload.js\"></script><script>livereload(%d, %d);</script>%b'\n % (content[:body_end], epoch, _timestamp(), content[body_end:])\n )\n\n @classmethod\n @functools.lru_cache() # \"Cache\" to not repeat the same message for the same browser tab.\n def _log_poll_request(cls, url, request_id):\n log.info(f\"Browser connected: {url}\")\n\n def _guess_type(cls, path):\n # MkDocs only ensures a few common types (as seen in livereload_tests.py::test_mime_types).\n # Other uncommon types will not be accepted.\n if path.endswith((\".js\", \".JS\")):\n return \"application/javascript\"\n if path.endswith(\".gz\"):\n return \"application/gzip\"\n\n guess, _ = mimetypes.guess_type(path)\n if guess:\n return guess\n return \"application/octet-stream\"\n\n\nclass _Handler(wsgiref.simple_server.WSGIRequestHandler):\n def log_request(self, code=\"-\", size=\"-\"):\n level = logging.DEBUG if str(code) == \"200\" else logging.WARNING\n log.log(level, f'\"{self.requestline}\" code {code}')\n\n def log_message(self, format, *args):\n log.debug(format, *args)\n\n\ndef _timestamp():\n return round(time.monotonic() * 1000)\n", "path": "mkdocs/livereload/__init__.py"}], "after_files": [{"content": "import functools\nimport io\nimport logging\nimport mimetypes\nimport os\nimport os.path\nimport re\nimport socketserver\nimport threading\nimport time\nimport warnings\nimport wsgiref.simple_server\n\nimport watchdog.events\nimport watchdog.observers.polling\n\n\nclass _LoggerAdapter(logging.LoggerAdapter):\n def process(self, msg, kwargs):\n return time.strftime(\"[%H:%M:%S] \") + msg, kwargs\n\n\nlog = _LoggerAdapter(logging.getLogger(__name__), {})\n\n\nclass LiveReloadServer(socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer):\n daemon_threads = True\n poll_response_timeout = 60\n\n def __init__(\n self,\n builder,\n host,\n port,\n root,\n mount_path=\"/\",\n polling_interval=0.5,\n shutdown_delay=0.25,\n **kwargs,\n ):\n self.builder = builder\n self.server_name = host\n self.server_port = port\n self.root = os.path.abspath(root)\n self.mount_path = (\"/\" + mount_path.lstrip(\"/\")).rstrip(\"/\") + \"/\"\n self.url = f\"http://{self.server_name}:{self.server_port}{self.mount_path}\"\n self.build_delay = 0.1\n self.shutdown_delay = shutdown_delay\n # To allow custom error pages.\n self.error_handler = lambda code: None\n\n super().__init__((host, port), _Handler, **kwargs)\n self.set_app(self.serve_request)\n\n self._wanted_epoch = _timestamp() # The version of the site that started building.\n self._visible_epoch = self._wanted_epoch # Latest fully built version of the site.\n self._epoch_cond = threading.Condition() # Must be held when accessing _visible_epoch.\n\n self._to_rebuild = {} # Used as an ordered set of functions to call.\n self._rebuild_cond = threading.Condition() # Must be held when accessing _to_rebuild.\n\n self._shutdown = False\n self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))\n self.observer = watchdog.observers.polling.PollingObserver(timeout=polling_interval)\n\n def watch(self, path, func=None, recursive=True):\n \"\"\"Add the 'path' to watched paths, call the function and reload when any file changes under it.\"\"\"\n path = os.path.abspath(path)\n if func in (None, self.builder):\n func = self.builder\n else:\n warnings.warn(\n \"Plugins should not pass the 'func' parameter of watch(). \"\n \"The ability to execute custom callbacks will be removed soon.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n def callback(event):\n if event.is_directory:\n return\n log.debug(str(event))\n with self._rebuild_cond:\n self._to_rebuild[func] = True\n self._rebuild_cond.notify_all()\n\n handler = watchdog.events.FileSystemEventHandler()\n handler.on_any_event = callback\n log.debug(f\"Watching '{path}'\")\n self.observer.schedule(handler, path, recursive=recursive)\n\n def serve(self):\n self.observer.start()\n\n log.info(f\"Serving on {self.url}\")\n self.serve_thread.start()\n\n self._build_loop()\n\n def _build_loop(self):\n while True:\n with self._rebuild_cond:\n while not self._rebuild_cond.wait_for(\n lambda: self._to_rebuild or self._shutdown, timeout=self.shutdown_delay\n ):\n # We could have used just one wait instead of a loop + timeout, but we need\n # occasional breaks, otherwise on Windows we can't receive KeyboardInterrupt.\n pass\n if self._shutdown:\n break\n log.info(\"Detected file changes\")\n while self._rebuild_cond.wait(timeout=self.build_delay):\n log.debug(\"Waiting for file changes to stop happening\")\n\n self._wanted_epoch = _timestamp()\n funcs = list(self._to_rebuild)\n self._to_rebuild.clear()\n\n for func in funcs:\n func()\n\n with self._epoch_cond:\n log.info(\"Reloading browsers\")\n self._visible_epoch = self._wanted_epoch\n self._epoch_cond.notify_all()\n\n def shutdown(self):\n self.observer.stop()\n with self._rebuild_cond:\n self._shutdown = True\n self._rebuild_cond.notify_all()\n\n if self.serve_thread.is_alive():\n super().shutdown()\n self.serve_thread.join()\n self.observer.join()\n\n def serve_request(self, environ, start_response):\n try:\n result = self._serve_request(environ, start_response)\n except Exception:\n code = 500\n msg = \"500 Internal Server Error\"\n log.exception(msg)\n else:\n if result is not None:\n return result\n code = 404\n msg = \"404 Not Found\"\n\n error_content = None\n try:\n error_content = self.error_handler(code)\n except Exception:\n log.exception(\"Failed to render an error message!\")\n if error_content is None:\n error_content = msg.encode()\n\n start_response(msg, [(\"Content-Type\", \"text/html\")])\n return [error_content]\n\n def _serve_request(self, environ, start_response):\n path = environ[\"PATH_INFO\"]\n\n m = re.fullmatch(r\"/livereload/([0-9]+)/[0-9]+\", path)\n if m:\n epoch = int(m[1])\n start_response(\"200 OK\", [(\"Content-Type\", \"text/plain\")])\n\n def condition():\n return self._visible_epoch > epoch\n\n with self._epoch_cond:\n if not condition():\n # Stall the browser, respond as soon as there's something new.\n # If there's not, respond anyway after a minute.\n self._log_poll_request(environ.get(\"HTTP_REFERER\"), request_id=path)\n self._epoch_cond.wait_for(condition, timeout=self.poll_response_timeout)\n return [b\"%d\" % self._visible_epoch]\n\n if path == \"/js/livereload.js\":\n file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"livereload.js\")\n elif path.startswith(self.mount_path):\n if path.endswith(\"/\"):\n path += \"index.html\"\n path = path[len(self.mount_path):]\n file_path = os.path.join(self.root, path.lstrip(\"/\"))\n elif path == \"/\":\n start_response(\"302 Found\", [(\"Location\", self.mount_path)])\n return []\n else:\n return None # Not found\n\n # Wait until the ongoing rebuild (if any) finishes, so we're not serving a half-built site.\n with self._epoch_cond:\n self._epoch_cond.wait_for(lambda: self._visible_epoch == self._wanted_epoch)\n epoch = self._visible_epoch\n\n try:\n file = open(file_path, \"rb\")\n except OSError:\n return None # Not found\n\n if path.endswith(\".html\"):\n with file:\n content = file.read()\n content = self._inject_js_into_html(content, epoch)\n file = io.BytesIO(content)\n content_length = len(content)\n else:\n content_length = os.path.getsize(file_path)\n\n content_type = self._guess_type(file_path)\n start_response(\n \"200 OK\", [(\"Content-Type\", content_type), (\"Content-Length\", str(content_length))]\n )\n return wsgiref.util.FileWrapper(file)\n\n @classmethod\n def _inject_js_into_html(cls, content, epoch):\n try:\n body_end = content.rindex(b\"</body>\")\n except ValueError:\n body_end = len(content)\n # The page will reload if the livereload poller returns a newer epoch than what it knows.\n # The other timestamp becomes just a unique identifier for the initiating page.\n return (\n b'%b<script src=\"/js/livereload.js\"></script><script>livereload(%d, %d);</script>%b'\n % (content[:body_end], epoch, _timestamp(), content[body_end:])\n )\n\n @classmethod\n @functools.lru_cache() # \"Cache\" to not repeat the same message for the same browser tab.\n def _log_poll_request(cls, url, request_id):\n log.info(f\"Browser connected: {url}\")\n\n def _guess_type(cls, path):\n # MkDocs only ensures a few common types (as seen in livereload_tests.py::test_mime_types).\n # Other uncommon types will not be accepted.\n if path.endswith((\".js\", \".JS\")):\n return \"application/javascript\"\n if path.endswith(\".gz\"):\n return \"application/gzip\"\n\n guess, _ = mimetypes.guess_type(path)\n if guess:\n return guess\n return \"application/octet-stream\"\n\n\nclass _Handler(wsgiref.simple_server.WSGIRequestHandler):\n def log_request(self, code=\"-\", size=\"-\"):\n level = logging.DEBUG if str(code) == \"200\" else logging.WARNING\n log.log(level, f'\"{self.requestline}\" code {code}')\n\n def log_message(self, format, *args):\n log.debug(format, *args)\n\n\ndef _timestamp():\n return round(time.monotonic() * 1000)\n", "path": "mkdocs/livereload/__init__.py"}]} | 3,570 | 985 |
gh_patches_debug_1194 | rasdani/github-patches | git_diff | pytorch__TensorRT-371 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
🐛 [Bug] An error occurs in CompileGraph when gpu_id == 1
When I tried to Complie on the second GPU in a multi-GPU environment, an error occurred. The code sample used is as follows.
```cpp
void load(const std::string& model_path, int64_t gpu_id, int64_t opt_batch_size) {
torch::jit::Module module = torch::jit::load(model_path);
torch::Device device = (torch::cuda::is_available() ? torch::Device(torch::kCUDA, gpu_id) : torch::Device(torch::kCPU));
module.to(device, torch::kHalf);
module.eval();
std::vector<int64_t> in_opt = { opt_batch_size, INPUT_CHANNEL_NUM, BOARD_WIDTH, BOARD_WIDTH };
trtorch::CompileSpec::InputRange range(in_opt);
trtorch::CompileSpec info({ range });
info.op_precision = torch::kHalf;
info.device.gpu_id = gpu_id;
module = trtorch::CompileGraph(module, info);
}
```
#### Error1
I called this function with gpu_id = 1. I got the following error:
```
terminate called after throwing an instance of 'trtorch::Error'
what(): [enforce fail at core/conversion/conversionctx/ConversionCtx.cpp:107] Expected cudaSetDevice(settings.device.gpu_id) to be true but got false
Unable to set gpu id: 1
```
I think this line is the cause.
https://github.com/NVIDIA/TRTorch/blob/1d4b967a28e36beee048703f5645ee6fcc95793d/core/conversion/conversionctx/ConversionCtx.cpp#L112
`cudaSetDevice` returns `cudaSuccess` (= 0) on success. However, `TRTORCH_CHECK` judges success or failure as a Boolean type.
I fixed it as follows and rebuilt it so that this error disappeared.
```diff
diff --git a/core/conversion/conversionctx/ConversionCtx.cpp b/core/conversion/conversionctx/ConversionCtx.cpp
index ff23692..bc5bf68 100644
--- a/core/conversion/conversionctx/ConversionCtx.cpp
+++ b/core/conversion/conversionctx/ConversionCtx.cpp
@@ -109,7 +109,7 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
cfg->setEngineCapability(settings.capability);
if (settings.device.gpu_id) {
- TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id), "Unable to set gpu id: " << settings.device.gpu_id);
+ TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id) == cudaSuccess, "Unable to set gpu id: " << settings.device.gpu_id);
}
if (settings.device.device_type == nvinfer1::DeviceType::kDLA) {
```
You may also use `set_device`.
https://github.com/NVIDIA/TRTorch/blob/1d4b967a28e36beee048703f5645ee6fcc95793d/core/compiler.cpp#L176-L178
#### Error2
After making the above fix, I get the following error:
```
ERROR: [TRTorch Conversion Context] - Builder was created on device different than current device.
```
I changed `cudaSetDevice` to do it at the beginning of the function and it worked fine.
```diff
diff --git a/core/conversion/conversionctx/ConversionCtx.cpp b/core/conversion/conversionctx/ConversionCtx.cpp
index ff23692..09a419c 100644
--- a/core/conversion/conversionctx/ConversionCtx.cpp
+++ b/core/conversion/conversionctx/ConversionCtx.cpp
@@ -47,6 +47,10 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
util::logging::get_logger().get_reportable_severity(),
util::logging::get_logger().get_is_colored_output_on()) {
// TODO: Support FP16 and FP32 from JIT information
+ if (settings.device.gpu_id) {
+ TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id) == cudaSuccess, "Unable to set gpu id: " << settings.device.gpu_id);
+ }
+
builder = nvinfer1::createInferBuilder(logger);
net = builder->createNetworkV2(1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH));
@@ -108,10 +112,6 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)
cfg->setDefaultDeviceType(settings.device.device_type);
cfg->setEngineCapability(settings.capability);
- if (settings.device.gpu_id) {
- TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id), "Unable to set gpu id: " << settings.device.gpu_id);
- }
-
if (settings.device.device_type == nvinfer1::DeviceType::kDLA) {
auto nbDLACores = builder->getNbDLACores();
TRTORCH_CHECK(
```
It's working, but I'm not sure if this is a good fix as there may be other side effects as well.
I would appreciate it if you could respond appropriately.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `py/trtorch/_compiler.py`
Content:
```
1 from typing import List, Dict, Any
2 import torch
3 from torch import nn
4
5 import trtorch._C
6 from trtorch._compile_spec import _parse_compile_spec
7 from trtorch._version import __version__
8 from types import FunctionType
9
10
11 def compile(module: torch.jit.ScriptModule, compile_spec: Any) -> torch.jit.ScriptModule:
12 """Compile a TorchScript module for NVIDIA GPUs using TensorRT
13
14 Takes a existing TorchScript module and a set of settings to configure the compiler
15 and will convert methods to JIT Graphs which call equivalent TensorRT engines
16
17 Converts specifically the forward method of a TorchScript Module
18
19 Args:
20 module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch
21 ``torch.nn.Module``
22 compile_spec (dict): Compilation settings including operating precision, target device, etc.
23 One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs
24 to the graph. All other keys are optional
25
26 .. code-block:: py
27
28 compile_spec = {
29 "input_shapes": [
30 (1, 3, 224, 224), # Static input shape for input #1
31 {
32 "min": (1, 3, 224, 224),
33 "opt": (1, 3, 512, 512),
34 "max": (1, 3, 1024, 1024)
35 } # Dynamic input shape for input #2
36 ],
37 "device": {
38 "device_type": torch.device("cuda"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)
39 "gpu_id": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA)
40 "dla_core": 0, # (DLA only) Target dla core id to run engine
41 "allow_gpu_fallback": false, # (DLA only) Allow layers unsupported on DLA to run on GPU
42 },
43 "op_precision": torch.half, # Operating precision set to FP16
44 "refit": false, # enable refit
45 "debug": false, # enable debuggable engine
46 "strict_types": false, # kernels should strictly run in operating precision
47 "capability": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels
48 "num_min_timing_iters": 2, # Number of minimization timing iterations used to select kernels
49 "num_avg_timing_iters": 1, # Number of averaging timing iterations used to select kernels
50 "workspace_size": 0, # Maximum size of workspace given to TensorRT
51 "max_batch_size": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)
52 }
53
54 Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using
55 torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum
56 to select device type.
57
58 Returns:
59 torch.jit.ScriptModule: Compiled TorchScript Module, when run it will execute via TensorRT
60 """
61
62 if isinstance(module, torch.jit.ScriptFunction):
63 raise TypeError(
64 "torch.jit.ScriptFunction currently is not directly supported, wrap the function in a module to compile")
65
66 compiled_cpp_mod = trtorch._C.compile_graph(module._c, _parse_compile_spec(compile_spec))
67 compiled_module = torch.jit._recursive.wrap_cpp_module(compiled_cpp_mod)
68 return compiled_module
69
70
71 def convert_method_to_trt_engine(module: torch.jit.ScriptModule, method_name: str, compile_spec: Any) -> str:
72 """Convert a TorchScript module method to a serialized TensorRT engine
73
74 Converts a specified method of a module to a serialized TensorRT engine given a dictionary of conversion settings
75
76 Args:
77 module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch
78 ``torch.nn.Module``
79 method_name (str): Name of method to convert
80 compile_spec (dict): Compilation settings including operating precision, target device, etc.
81 One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs
82 to the graph. All other keys are optional
83
84 .. code-block:: py
85
86 CompileSpec = {
87 "input_shapes": [
88 (1, 3, 224, 224), # Static input shape for input #1
89 {
90 "min": (1, 3, 224, 224),
91 "opt": (1, 3, 512, 512),
92 "max": (1, 3, 1024, 1024)
93 } # Dynamic input shape for input #2
94 ],
95 "device": {
96 "device_type": torch.device("cuda"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)
97 "gpu_id": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA)
98 "dla_core": 0, # (DLA only) Target dla core id to run engine
99 "allow_gpu_fallback": false, # (DLA only) Allow layers unsupported on DLA to run on GPU
100 },
101 "op_precision": torch.half, # Operating precision set to FP16
102 "disable_tf32": False, # Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas
103 "refit": false, # enable refit
104 "debug": false, # enable debuggable engine
105 "strict_types": false, # kernels should strictly run in operating precision
106 "capability": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels
107 "num_min_timing_iters": 2, # Number of minimization timing iterations used to select kernels
108 "num_avg_timing_iters": 1, # Number of averaging timing iterations used to select kernels
109 "workspace_size": 0, # Maximum size of workspace given to TensorRT
110 "max_batch_size": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)
111 }
112
113 Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using
114 torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum
115 to select device type.
116
117 Returns:
118 bytes: Serialized TensorRT engine, can either be saved to a file or deserialized via TensorRT APIs
119 """
120 if isinstance(module, torch.jit.ScriptFunction):
121 raise TypeError(
122 "torch.jit.ScriptFunctions currently are not directly supported, wrap the function in a module to compile")
123
124 return trtorch._C.convert_graph_to_trt_engine(module._c, method_name, _parse_compile_spec(compile_spec))
125
126
127 def check_method_op_support(module: torch.jit.ScriptModule, method_name: str) -> bool:
128 """Checks to see if a method is fully supported by TRTorch
129
130 Checks if a method of a TorchScript module can be compiled by TRTorch, if not, a list of operators
131 that are not supported are printed out and the function returns false, else true.
132
133 Args:
134 module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch
135 ``torch.nn.Module``
136 method_name (str): Name of method to check
137
138 Returns:
139 bool: True if supported Method
140 """
141 return trtorch._C.check_method_op_support(module._c, method_name)
142
143
144 def dump_build_info():
145 """Prints build information about the TRTorch distribution to stdout
146 """
147 print(get_build_info())
148
149
150 def get_build_info() -> str:
151 """Returns a string containing the build information of TRTorch distribution
152
153 Returns:
154 str: String containing the build information for TRTorch distribution
155 """
156 build_info = trtorch._C.get_build_info()
157 build_info = "TRTorch Version: " + str(__version__) + '\n' + build_info
158 return build_info
159
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/py/trtorch/_compiler.py b/py/trtorch/_compiler.py
--- a/py/trtorch/_compiler.py
+++ b/py/trtorch/_compiler.py
@@ -156,3 +156,6 @@
build_info = trtorch._C.get_build_info()
build_info = "TRTorch Version: " + str(__version__) + '\n' + build_info
return build_info
+
+def set_device(gpu_id):
+ trtorch._C.set_device(gpu_id)
| {"golden_diff": "diff --git a/py/trtorch/_compiler.py b/py/trtorch/_compiler.py\n--- a/py/trtorch/_compiler.py\n+++ b/py/trtorch/_compiler.py\n@@ -156,3 +156,6 @@\n build_info = trtorch._C.get_build_info()\n build_info = \"TRTorch Version: \" + str(__version__) + '\\n' + build_info\n return build_info\n+\n+def set_device(gpu_id):\n+ trtorch._C.set_device(gpu_id)\n", "issue": "\ud83d\udc1b [Bug] An error occurs in CompileGraph when gpu_id == 1\nWhen I tried to Complie on the second GPU in a multi-GPU environment, an error occurred. The code sample used is as follows.\r\n\r\n```cpp\r\nvoid load(const std::string& model_path, int64_t gpu_id, int64_t opt_batch_size) {\r\n torch::jit::Module module = torch::jit::load(model_path);\r\n torch::Device device = (torch::cuda::is_available() ? torch::Device(torch::kCUDA, gpu_id) : torch::Device(torch::kCPU));\r\n module.to(device, torch::kHalf);\r\n module.eval();\r\n\r\n std::vector<int64_t> in_opt = { opt_batch_size, INPUT_CHANNEL_NUM, BOARD_WIDTH, BOARD_WIDTH };\r\n\r\n trtorch::CompileSpec::InputRange range(in_opt);\r\n trtorch::CompileSpec info({ range });\r\n info.op_precision = torch::kHalf;\r\n info.device.gpu_id = gpu_id;\r\n module = trtorch::CompileGraph(module, info);\r\n}\r\n```\r\n\r\n#### Error1\r\nI called this function with gpu_id = 1. I got the following error:\r\n\r\n```\r\nterminate called after throwing an instance of 'trtorch::Error'\r\n what(): [enforce fail at core/conversion/conversionctx/ConversionCtx.cpp:107] Expected cudaSetDevice(settings.device.gpu_id) to be true but got false\r\nUnable to set gpu id: 1\r\n```\r\n\r\nI think this line is the cause.\r\nhttps://github.com/NVIDIA/TRTorch/blob/1d4b967a28e36beee048703f5645ee6fcc95793d/core/conversion/conversionctx/ConversionCtx.cpp#L112\r\n\r\n`cudaSetDevice` returns `cudaSuccess` (= 0) on success. However, `TRTORCH_CHECK` judges success or failure as a Boolean type.\r\n\r\nI fixed it as follows and rebuilt it so that this error disappeared.\r\n\r\n```diff\r\ndiff --git a/core/conversion/conversionctx/ConversionCtx.cpp b/core/conversion/conversionctx/ConversionCtx.cpp\r\nindex ff23692..bc5bf68 100644\r\n--- a/core/conversion/conversionctx/ConversionCtx.cpp\r\n+++ b/core/conversion/conversionctx/ConversionCtx.cpp\r\n@@ -109,7 +109,7 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)\r\n cfg->setEngineCapability(settings.capability);\r\n \r\n if (settings.device.gpu_id) {\r\n- TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id), \"Unable to set gpu id: \" << settings.device.gpu_id);\r\n+ TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id) == cudaSuccess, \"Unable to set gpu id: \" << settings.device.gpu_id);\r\n }\r\n \r\n if (settings.device.device_type == nvinfer1::DeviceType::kDLA) {\r\n```\r\n\r\nYou may also use `set_device`.\r\n\r\nhttps://github.com/NVIDIA/TRTorch/blob/1d4b967a28e36beee048703f5645ee6fcc95793d/core/compiler.cpp#L176-L178\r\n\r\n#### Error2\r\nAfter making the above fix, I get the following error:\r\n\r\n```\r\nERROR: [TRTorch Conversion Context] - Builder was created on device different than current device.\r\n```\r\n\r\nI changed `cudaSetDevice` to do it at the beginning of the function and it worked fine.\r\n\r\n```diff\r\ndiff --git a/core/conversion/conversionctx/ConversionCtx.cpp b/core/conversion/conversionctx/ConversionCtx.cpp\r\nindex ff23692..09a419c 100644\r\n--- a/core/conversion/conversionctx/ConversionCtx.cpp\r\n+++ b/core/conversion/conversionctx/ConversionCtx.cpp\r\n@@ -47,6 +47,10 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)\r\n util::logging::get_logger().get_reportable_severity(),\r\n util::logging::get_logger().get_is_colored_output_on()) {\r\n // TODO: Support FP16 and FP32 from JIT information\r\n+ if (settings.device.gpu_id) {\r\n+ TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id) == cudaSuccess, \"Unable to set gpu id: \" << settings.device.gpu_id);\r\n+ }\r\n+\r\n builder = nvinfer1::createInferBuilder(logger);\r\n net = builder->createNetworkV2(1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH));\r\n \r\n@@ -108,10 +112,6 @@ ConversionCtx::ConversionCtx(BuilderSettings build_settings)\r\n cfg->setDefaultDeviceType(settings.device.device_type);\r\n cfg->setEngineCapability(settings.capability);\r\n \r\n- if (settings.device.gpu_id) {\r\n- TRTORCH_CHECK(cudaSetDevice(settings.device.gpu_id), \"Unable to set gpu id: \" << settings.device.gpu_id);\r\n- }\r\n-\r\n if (settings.device.device_type == nvinfer1::DeviceType::kDLA) {\r\n auto nbDLACores = builder->getNbDLACores();\r\n TRTORCH_CHECK(\r\n```\r\n\r\nIt's working, but I'm not sure if this is a good fix as there may be other side effects as well.\r\nI would appreciate it if you could respond appropriately.\n", "before_files": [{"content": "from typing import List, Dict, Any\nimport torch\nfrom torch import nn\n\nimport trtorch._C\nfrom trtorch._compile_spec import _parse_compile_spec\nfrom trtorch._version import __version__\nfrom types import FunctionType\n\n\ndef compile(module: torch.jit.ScriptModule, compile_spec: Any) -> torch.jit.ScriptModule:\n \"\"\"Compile a TorchScript module for NVIDIA GPUs using TensorRT\n\n Takes a existing TorchScript module and a set of settings to configure the compiler\n and will convert methods to JIT Graphs which call equivalent TensorRT engines\n\n Converts specifically the forward method of a TorchScript Module\n\n Args:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n compile_spec (dict): Compilation settings including operating precision, target device, etc.\n One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs\n to the graph. All other keys are optional\n\n .. code-block:: py\n\n compile_spec = {\n \"input_shapes\": [\n (1, 3, 224, 224), # Static input shape for input #1\n {\n \"min\": (1, 3, 224, 224),\n \"opt\": (1, 3, 512, 512),\n \"max\": (1, 3, 1024, 1024)\n } # Dynamic input shape for input #2\n ],\n \"device\": {\n \"device_type\": torch.device(\"cuda\"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)\n \"gpu_id\": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA)\n \"dla_core\": 0, # (DLA only) Target dla core id to run engine\n \"allow_gpu_fallback\": false, # (DLA only) Allow layers unsupported on DLA to run on GPU\n },\n \"op_precision\": torch.half, # Operating precision set to FP16\n \"refit\": false, # enable refit\n \"debug\": false, # enable debuggable engine\n \"strict_types\": false, # kernels should strictly run in operating precision\n \"capability\": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels\n \"num_min_timing_iters\": 2, # Number of minimization timing iterations used to select kernels\n \"num_avg_timing_iters\": 1, # Number of averaging timing iterations used to select kernels\n \"workspace_size\": 0, # Maximum size of workspace given to TensorRT\n \"max_batch_size\": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)\n }\n\n Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using\n torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum\n to select device type.\n\n Returns:\n torch.jit.ScriptModule: Compiled TorchScript Module, when run it will execute via TensorRT\n \"\"\"\n\n if isinstance(module, torch.jit.ScriptFunction):\n raise TypeError(\n \"torch.jit.ScriptFunction currently is not directly supported, wrap the function in a module to compile\")\n\n compiled_cpp_mod = trtorch._C.compile_graph(module._c, _parse_compile_spec(compile_spec))\n compiled_module = torch.jit._recursive.wrap_cpp_module(compiled_cpp_mod)\n return compiled_module\n\n\ndef convert_method_to_trt_engine(module: torch.jit.ScriptModule, method_name: str, compile_spec: Any) -> str:\n \"\"\"Convert a TorchScript module method to a serialized TensorRT engine\n\n Converts a specified method of a module to a serialized TensorRT engine given a dictionary of conversion settings\n\n Args:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n method_name (str): Name of method to convert\n compile_spec (dict): Compilation settings including operating precision, target device, etc.\n One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs\n to the graph. All other keys are optional\n\n .. code-block:: py\n\n CompileSpec = {\n \"input_shapes\": [\n (1, 3, 224, 224), # Static input shape for input #1\n {\n \"min\": (1, 3, 224, 224),\n \"opt\": (1, 3, 512, 512),\n \"max\": (1, 3, 1024, 1024)\n } # Dynamic input shape for input #2\n ],\n \"device\": {\n \"device_type\": torch.device(\"cuda\"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)\n \"gpu_id\": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA)\n \"dla_core\": 0, # (DLA only) Target dla core id to run engine\n \"allow_gpu_fallback\": false, # (DLA only) Allow layers unsupported on DLA to run on GPU\n },\n \"op_precision\": torch.half, # Operating precision set to FP16\n \"disable_tf32\": False, # Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas\n \"refit\": false, # enable refit\n \"debug\": false, # enable debuggable engine\n \"strict_types\": false, # kernels should strictly run in operating precision\n \"capability\": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels\n \"num_min_timing_iters\": 2, # Number of minimization timing iterations used to select kernels\n \"num_avg_timing_iters\": 1, # Number of averaging timing iterations used to select kernels\n \"workspace_size\": 0, # Maximum size of workspace given to TensorRT\n \"max_batch_size\": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)\n }\n\n Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using\n torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum\n to select device type.\n\n Returns:\n bytes: Serialized TensorRT engine, can either be saved to a file or deserialized via TensorRT APIs\n \"\"\"\n if isinstance(module, torch.jit.ScriptFunction):\n raise TypeError(\n \"torch.jit.ScriptFunctions currently are not directly supported, wrap the function in a module to compile\")\n\n return trtorch._C.convert_graph_to_trt_engine(module._c, method_name, _parse_compile_spec(compile_spec))\n\n\ndef check_method_op_support(module: torch.jit.ScriptModule, method_name: str) -> bool:\n \"\"\"Checks to see if a method is fully supported by TRTorch\n\n Checks if a method of a TorchScript module can be compiled by TRTorch, if not, a list of operators\n that are not supported are printed out and the function returns false, else true.\n\n Args:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n method_name (str): Name of method to check\n\n Returns:\n bool: True if supported Method\n \"\"\"\n return trtorch._C.check_method_op_support(module._c, method_name)\n\n\ndef dump_build_info():\n \"\"\"Prints build information about the TRTorch distribution to stdout\n \"\"\"\n print(get_build_info())\n\n\ndef get_build_info() -> str:\n \"\"\"Returns a string containing the build information of TRTorch distribution\n\n Returns:\n str: String containing the build information for TRTorch distribution\n \"\"\"\n build_info = trtorch._C.get_build_info()\n build_info = \"TRTorch Version: \" + str(__version__) + '\\n' + build_info\n return build_info\n", "path": "py/trtorch/_compiler.py"}], "after_files": [{"content": "from typing import List, Dict, Any\nimport torch\nfrom torch import nn\n\nimport trtorch._C\nfrom trtorch._compile_spec import _parse_compile_spec\nfrom trtorch._version import __version__\nfrom types import FunctionType\n\n\ndef compile(module: torch.jit.ScriptModule, compile_spec: Any) -> torch.jit.ScriptModule:\n \"\"\"Compile a TorchScript module for NVIDIA GPUs using TensorRT\n\n Takes a existing TorchScript module and a set of settings to configure the compiler\n and will convert methods to JIT Graphs which call equivalent TensorRT engines\n\n Converts specifically the forward method of a TorchScript Module\n\n Args:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n compile_spec (dict): Compilation settings including operating precision, target device, etc.\n One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs\n to the graph. All other keys are optional\n\n .. code-block:: py\n\n compile_spec = {\n \"input_shapes\": [\n (1, 3, 224, 224), # Static input shape for input #1\n {\n \"min\": (1, 3, 224, 224),\n \"opt\": (1, 3, 512, 512),\n \"max\": (1, 3, 1024, 1024)\n } # Dynamic input shape for input #2\n ],\n \"device\": {\n \"device_type\": torch.device(\"cuda\"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)\n \"gpu_id\": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA)\n \"dla_core\": 0, # (DLA only) Target dla core id to run engine\n \"allow_gpu_fallback\": false, # (DLA only) Allow layers unsupported on DLA to run on GPU\n },\n \"op_precision\": torch.half, # Operating precision set to FP16\n \"refit\": false, # enable refit\n \"debug\": false, # enable debuggable engine\n \"strict_types\": false, # kernels should strictly run in operating precision\n \"capability\": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels\n \"num_min_timing_iters\": 2, # Number of minimization timing iterations used to select kernels\n \"num_avg_timing_iters\": 1, # Number of averaging timing iterations used to select kernels\n \"workspace_size\": 0, # Maximum size of workspace given to TensorRT\n \"max_batch_size\": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)\n }\n\n Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using\n torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum\n to select device type.\n\n Returns:\n torch.jit.ScriptModule: Compiled TorchScript Module, when run it will execute via TensorRT\n \"\"\"\n\n if isinstance(module, torch.jit.ScriptFunction):\n raise TypeError(\n \"torch.jit.ScriptFunction currently is not directly supported, wrap the function in a module to compile\")\n\n compiled_cpp_mod = trtorch._C.compile_graph(module._c, _parse_compile_spec(compile_spec))\n compiled_module = torch.jit._recursive.wrap_cpp_module(compiled_cpp_mod)\n return compiled_module\n\n\ndef convert_method_to_trt_engine(module: torch.jit.ScriptModule, method_name: str, compile_spec: Any) -> str:\n \"\"\"Convert a TorchScript module method to a serialized TensorRT engine\n\n Converts a specified method of a module to a serialized TensorRT engine given a dictionary of conversion settings\n\n Args:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n method_name (str): Name of method to convert\n compile_spec (dict): Compilation settings including operating precision, target device, etc.\n One key is required which is ``input_shapes``, describing the input sizes or ranges for inputs\n to the graph. All other keys are optional\n\n .. code-block:: py\n\n CompileSpec = {\n \"input_shapes\": [\n (1, 3, 224, 224), # Static input shape for input #1\n {\n \"min\": (1, 3, 224, 224),\n \"opt\": (1, 3, 512, 512),\n \"max\": (1, 3, 1024, 1024)\n } # Dynamic input shape for input #2\n ],\n \"device\": {\n \"device_type\": torch.device(\"cuda\"), # Type of device to run engine on (for DLA use trtorch.DeviceType.DLA)\n \"gpu_id\": 0, # Target gpu id to run engine (Use Xavier as gpu id for DLA)\n \"dla_core\": 0, # (DLA only) Target dla core id to run engine\n \"allow_gpu_fallback\": false, # (DLA only) Allow layers unsupported on DLA to run on GPU\n },\n \"op_precision\": torch.half, # Operating precision set to FP16\n \"disable_tf32\": False, # Force FP32 layers to use traditional as FP32 format vs the default behavior of rounding the inputs to 10-bit mantissas before multiplying, but accumulates the sum using 23-bit mantissas\n \"refit\": false, # enable refit\n \"debug\": false, # enable debuggable engine\n \"strict_types\": false, # kernels should strictly run in operating precision\n \"capability\": trtorch.EngineCapability.DEFAULT, # Restrict kernel selection to safe gpu kernels or safe dla kernels\n \"num_min_timing_iters\": 2, # Number of minimization timing iterations used to select kernels\n \"num_avg_timing_iters\": 1, # Number of averaging timing iterations used to select kernels\n \"workspace_size\": 0, # Maximum size of workspace given to TensorRT\n \"max_batch_size\": 0, # Maximum batch size (must be >= 1 to be set, 0 means not set)\n }\n\n Input Sizes can be specified as torch sizes, tuples or lists. Op precisions can be specified using\n torch datatypes or trtorch datatypes and you can use either torch devices or the trtorch device type enum\n to select device type.\n\n Returns:\n bytes: Serialized TensorRT engine, can either be saved to a file or deserialized via TensorRT APIs\n \"\"\"\n if isinstance(module, torch.jit.ScriptFunction):\n raise TypeError(\n \"torch.jit.ScriptFunctions currently are not directly supported, wrap the function in a module to compile\")\n\n return trtorch._C.convert_graph_to_trt_engine(module._c, method_name, _parse_compile_spec(compile_spec))\n\n\ndef check_method_op_support(module: torch.jit.ScriptModule, method_name: str) -> bool:\n \"\"\"Checks to see if a method is fully supported by TRTorch\n\n Checks if a method of a TorchScript module can be compiled by TRTorch, if not, a list of operators\n that are not supported are printed out and the function returns false, else true.\n\n Args:\n module (torch.jit.ScriptModule): Source module, a result of tracing or scripting a PyTorch\n ``torch.nn.Module``\n method_name (str): Name of method to check\n\n Returns:\n bool: True if supported Method\n \"\"\"\n return trtorch._C.check_method_op_support(module._c, method_name)\n\n\ndef dump_build_info():\n \"\"\"Prints build information about the TRTorch distribution to stdout\n \"\"\"\n print(get_build_info())\n\n\ndef get_build_info() -> str:\n \"\"\"Returns a string containing the build information of TRTorch distribution\n\n Returns:\n str: String containing the build information for TRTorch distribution\n \"\"\"\n build_info = trtorch._C.get_build_info()\n build_info = \"TRTorch Version: \" + str(__version__) + '\\n' + build_info\n return build_info\n\ndef set_device(gpu_id):\n trtorch._C.set_device(gpu_id)\n", "path": "py/trtorch/_compiler.py"}]} | 3,644 | 112 |
gh_patches_debug_10948 | rasdani/github-patches | git_diff | dmlc__dgl-1305 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: module 'dgl.nn' has no attribute 'pytorch'
## 🐛 Bug
When I try to use some of the predefined modules of dgl by the following code, I encounter an error: AttributeError: module 'dgl.nn' has no attribute 'pytorch'.
Similar problems also happen to other backends, including TensorFlow and MXNet.
## To Reproduce
Steps to reproduce the behavior:
```python
import dgl.nn
# or import dgl
c = dgl.nn.pytorch.conv.GraphConv(10,2)
```
## Expected behavior
The code should generate a GraphConv layer without any error.
## Environment
- DGL Version (e.g., 1.0): 0.4.2
- Backend Library & Version (e.g., PyTorch 0.4.1, MXNet/Gluon 1.3): Pytorch 1.4.0
- OS (e.g., Linux): Irrelelevent
- How you installed DGL (`conda`, `pip`, source): conda
- Build command you used (if compiling from source):
- Python version: 3.7
- CUDA/cuDNN version (if applicable): Irrelelevent
- GPU models and configuration (e.g. V100): Irrelelevent
- Any other relevant information:
## Additional context
I read the source code and **found the reason and solution** to this problem.
### Reason:
The `__init__.py` file of `dgl/nn` is empty. Therefore, if i import dgl or dgl.nn, python cannot automatically find files of its sub-directories.
I verified it by the following code:
```python
import dgl.nn.python
c = dgl.nn.pytorch.conv.GraphConv(10,2)
```
It works fine.
### Solution:
Add 3 lines of code like `from . import pytorch` into file `dgl/nn/__init__.py` for PyTorch and the other 2 backends.
It is better to automatically detect the backend library and import the correct sub-directory.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/dgl/__init__.py`
Content:
```
1 """DGL root package."""
2 # Windows compatibility
3 # This initializes Winsock and performs cleanup at termination as required
4 import socket
5
6 # Need to ensure that the backend framework is imported before load dgl libs,
7 # otherwise weird cuda problem happens
8 from .backend import load_backend
9
10 from . import function
11 from . import nn
12 from . import contrib
13 from . import container
14 from . import random
15 from . import sampling
16
17 from ._ffi.runtime_ctypes import TypeCode
18 from ._ffi.function import register_func, get_global_func, list_global_func_names, extract_ext_funcs
19 from ._ffi.base import DGLError, __version__
20
21 from .base import ALL, NTYPE, NID, ETYPE, EID
22 from .readout import *
23 from .batched_heterograph import *
24 from .convert import *
25 from .graph import DGLGraph, batch, unbatch
26 from .generators import *
27 from .heterograph import DGLHeteroGraph
28 from .nodeflow import *
29 from .traversal import *
30 from .transform import *
31 from .propagate import *
32 from .udf import NodeBatch, EdgeBatch
33
```
Path: `python/dgl/nn/__init__.py`
Content:
```
1 """Package for neural network common components."""
2
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/dgl/__init__.py b/python/dgl/__init__.py
--- a/python/dgl/__init__.py
+++ b/python/dgl/__init__.py
@@ -8,7 +8,6 @@
from .backend import load_backend
from . import function
-from . import nn
from . import contrib
from . import container
from . import random
diff --git a/python/dgl/nn/__init__.py b/python/dgl/nn/__init__.py
--- a/python/dgl/nn/__init__.py
+++ b/python/dgl/nn/__init__.py
@@ -1 +1,12 @@
"""Package for neural network common components."""
+import importlib
+import sys
+from ..backend import backend_name
+
+def _load_backend(mod_name):
+ mod = importlib.import_module('.%s' % mod_name, __name__)
+ thismod = sys.modules[__name__]
+ for api, obj in mod.__dict__.items():
+ setattr(thismod, api, obj)
+
+_load_backend(backend_name)
| {"golden_diff": "diff --git a/python/dgl/__init__.py b/python/dgl/__init__.py\n--- a/python/dgl/__init__.py\n+++ b/python/dgl/__init__.py\n@@ -8,7 +8,6 @@\n from .backend import load_backend\n \n from . import function\n-from . import nn\n from . import contrib\n from . import container\n from . import random\ndiff --git a/python/dgl/nn/__init__.py b/python/dgl/nn/__init__.py\n--- a/python/dgl/nn/__init__.py\n+++ b/python/dgl/nn/__init__.py\n@@ -1 +1,12 @@\n \"\"\"Package for neural network common components.\"\"\"\n+import importlib\n+import sys\n+from ..backend import backend_name\n+\n+def _load_backend(mod_name):\n+ mod = importlib.import_module('.%s' % mod_name, __name__)\n+ thismod = sys.modules[__name__]\n+ for api, obj in mod.__dict__.items():\n+ setattr(thismod, api, obj)\n+\n+_load_backend(backend_name)\n", "issue": "AttributeError: module 'dgl.nn' has no attribute 'pytorch'\n## \ud83d\udc1b Bug\r\n\r\nWhen I try to use some of the predefined modules of dgl by the following code, I encounter an error: AttributeError: module 'dgl.nn' has no attribute 'pytorch'.\r\n\r\nSimilar problems also happen to other backends, including TensorFlow and MXNet.\r\n\r\n## To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\n```python\r\nimport dgl.nn\r\n# or import dgl\r\nc = dgl.nn.pytorch.conv.GraphConv(10,2)\r\n```\r\n\r\n## Expected behavior\r\n\r\nThe code should generate a GraphConv layer without any error.\r\n\r\n## Environment\r\n\r\n - DGL Version (e.g., 1.0): 0.4.2\r\n - Backend Library & Version (e.g., PyTorch 0.4.1, MXNet/Gluon 1.3): Pytorch 1.4.0\r\n - OS (e.g., Linux): Irrelelevent \r\n - How you installed DGL (`conda`, `pip`, source): conda\r\n - Build command you used (if compiling from source):\r\n - Python version: 3.7\r\n - CUDA/cuDNN version (if applicable): Irrelelevent \r\n - GPU models and configuration (e.g. V100): Irrelelevent \r\n - Any other relevant information:\r\n\r\n## Additional context\r\n\r\nI read the source code and **found the reason and solution** to this problem.\r\n\r\n### Reason:\r\n\r\nThe `__init__.py` file of `dgl/nn` is empty. Therefore, if i import dgl or dgl.nn, python cannot automatically find files of its sub-directories.\r\n\r\nI verified it by the following code:\r\n```python\r\nimport dgl.nn.python\r\nc = dgl.nn.pytorch.conv.GraphConv(10,2)\r\n```\r\nIt works fine.\r\n\r\n### Solution:\r\n\r\nAdd 3 lines of code like `from . import pytorch` into file `dgl/nn/__init__.py` for PyTorch and the other 2 backends.\r\nIt is better to automatically detect the backend library and import the correct sub-directory.\n", "before_files": [{"content": "\"\"\"DGL root package.\"\"\"\n# Windows compatibility\n# This initializes Winsock and performs cleanup at termination as required\nimport socket\n\n# Need to ensure that the backend framework is imported before load dgl libs,\n# otherwise weird cuda problem happens\nfrom .backend import load_backend\n\nfrom . import function\nfrom . import nn\nfrom . import contrib\nfrom . import container\nfrom . import random\nfrom . import sampling\n\nfrom ._ffi.runtime_ctypes import TypeCode\nfrom ._ffi.function import register_func, get_global_func, list_global_func_names, extract_ext_funcs\nfrom ._ffi.base import DGLError, __version__\n\nfrom .base import ALL, NTYPE, NID, ETYPE, EID\nfrom .readout import *\nfrom .batched_heterograph import *\nfrom .convert import *\nfrom .graph import DGLGraph, batch, unbatch\nfrom .generators import *\nfrom .heterograph import DGLHeteroGraph\nfrom .nodeflow import *\nfrom .traversal import *\nfrom .transform import *\nfrom .propagate import *\nfrom .udf import NodeBatch, EdgeBatch\n", "path": "python/dgl/__init__.py"}, {"content": "\"\"\"Package for neural network common components.\"\"\"\n", "path": "python/dgl/nn/__init__.py"}], "after_files": [{"content": "\"\"\"DGL root package.\"\"\"\n# Windows compatibility\n# This initializes Winsock and performs cleanup at termination as required\nimport socket\n\n# Need to ensure that the backend framework is imported before load dgl libs,\n# otherwise weird cuda problem happens\nfrom .backend import load_backend\n\nfrom . import function\nfrom . import contrib\nfrom . import container\nfrom . import random\nfrom . import sampling\n\nfrom ._ffi.runtime_ctypes import TypeCode\nfrom ._ffi.function import register_func, get_global_func, list_global_func_names, extract_ext_funcs\nfrom ._ffi.base import DGLError, __version__\n\nfrom .base import ALL, NTYPE, NID, ETYPE, EID\nfrom .readout import *\nfrom .batched_heterograph import *\nfrom .convert import *\nfrom .graph import DGLGraph, batch, unbatch\nfrom .generators import *\nfrom .heterograph import DGLHeteroGraph\nfrom .nodeflow import *\nfrom .traversal import *\nfrom .transform import *\nfrom .propagate import *\nfrom .udf import NodeBatch, EdgeBatch\n", "path": "python/dgl/__init__.py"}, {"content": "\"\"\"Package for neural network common components.\"\"\"\nimport importlib\nimport sys\nfrom ..backend import backend_name\n\ndef _load_backend(mod_name):\n mod = importlib.import_module('.%s' % mod_name, __name__)\n thismod = sys.modules[__name__]\n for api, obj in mod.__dict__.items():\n setattr(thismod, api, obj)\n\n_load_backend(backend_name)\n", "path": "python/dgl/nn/__init__.py"}]} | 1,034 | 234 |
gh_patches_debug_14394 | rasdani/github-patches | git_diff | comic__grand-challenge.org-1728 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Annotation answers get parsed incorrectly in csv export
For annotation type answers, the csv export looks like this currently:

It appears the annotation json gets part as part of the export. We should probably add some escaping.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/core/renderers.py`
Content:
```
1 from rest_framework_csv.renderers import CSVRenderer
2
3
4 class PaginatedCSVRenderer(CSVRenderer):
5 results_field = "results"
6
7 def render(self, data, *args, **kwargs):
8 if self.results_field in data:
9 data = data[self.results_field]
10
11 return super().render(data, *args, **kwargs)
12
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/core/renderers.py b/app/grandchallenge/core/renderers.py
--- a/app/grandchallenge/core/renderers.py
+++ b/app/grandchallenge/core/renderers.py
@@ -1,3 +1,5 @@
+import json
+
from rest_framework_csv.renderers import CSVRenderer
@@ -9,3 +11,19 @@
data = data[self.results_field]
return super().render(data, *args, **kwargs)
+
+ def flatten_data(self, data):
+ """
+ Create a dictionary that is 1 level deep, with nested values serialized
+ as json. This means that the header rows are now consistent.
+ """
+ for row in data:
+ flat_row = {k: self._flatten_value(v) for k, v in row.items()}
+ yield flat_row
+
+ @staticmethod
+ def _flatten_value(value):
+ if isinstance(value, (dict, list)):
+ return json.dumps(value)
+ else:
+ return value
| {"golden_diff": "diff --git a/app/grandchallenge/core/renderers.py b/app/grandchallenge/core/renderers.py\n--- a/app/grandchallenge/core/renderers.py\n+++ b/app/grandchallenge/core/renderers.py\n@@ -1,3 +1,5 @@\n+import json\n+\n from rest_framework_csv.renderers import CSVRenderer\n \n \n@@ -9,3 +11,19 @@\n data = data[self.results_field]\n \n return super().render(data, *args, **kwargs)\n+\n+ def flatten_data(self, data):\n+ \"\"\"\n+ Create a dictionary that is 1 level deep, with nested values serialized\n+ as json. This means that the header rows are now consistent.\n+ \"\"\"\n+ for row in data:\n+ flat_row = {k: self._flatten_value(v) for k, v in row.items()}\n+ yield flat_row\n+\n+ @staticmethod\n+ def _flatten_value(value):\n+ if isinstance(value, (dict, list)):\n+ return json.dumps(value)\n+ else:\n+ return value\n", "issue": "Annotation answers get parsed incorrectly in csv export\nFor annotation type answers, the csv export looks like this currently:\r\n\r\n\r\nIt appears the annotation json gets part as part of the export. We should probably add some escaping.\n", "before_files": [{"content": "from rest_framework_csv.renderers import CSVRenderer\n\n\nclass PaginatedCSVRenderer(CSVRenderer):\n results_field = \"results\"\n\n def render(self, data, *args, **kwargs):\n if self.results_field in data:\n data = data[self.results_field]\n\n return super().render(data, *args, **kwargs)\n", "path": "app/grandchallenge/core/renderers.py"}], "after_files": [{"content": "import json\n\nfrom rest_framework_csv.renderers import CSVRenderer\n\n\nclass PaginatedCSVRenderer(CSVRenderer):\n results_field = \"results\"\n\n def render(self, data, *args, **kwargs):\n if self.results_field in data:\n data = data[self.results_field]\n\n return super().render(data, *args, **kwargs)\n\n def flatten_data(self, data):\n \"\"\"\n Create a dictionary that is 1 level deep, with nested values serialized\n as json. This means that the header rows are now consistent.\n \"\"\"\n for row in data:\n flat_row = {k: self._flatten_value(v) for k, v in row.items()}\n yield flat_row\n\n @staticmethod\n def _flatten_value(value):\n if isinstance(value, (dict, list)):\n return json.dumps(value)\n else:\n return value\n", "path": "app/grandchallenge/core/renderers.py"}]} | 474 | 228 |
gh_patches_debug_6909 | rasdani/github-patches | git_diff | ResonantGeoData__ResonantGeoData-436 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
expose STAC post endpoint in Python client
for https://github.com/ResonantGeoData/issue-dashboard/issues/11
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rgdc/rgdc/rgdc.py`
Content:
```
1 from base64 import b64encode
2 from dataclasses import dataclass
3 import getpass
4 from pathlib import Path
5 import tempfile
6 from typing import Dict, Iterator, List, Optional, Tuple, Union
7
8 from tqdm import tqdm
9
10 from .session import RgdcSession
11 from .types import DATETIME_OR_STR_TUPLE, SEARCH_PREDICATE_CHOICE
12 from .utils import (
13 DEFAULT_RGD_API,
14 download_checksum_file_to_path,
15 limit_offset_pager,
16 spatial_search_params,
17 spatial_subentry_id,
18 )
19
20
21 @dataclass
22 class RasterDownload:
23 path: Path
24 images: List[Path]
25 ancillary: List[Path]
26
27
28 class Rgdc:
29 def __init__(
30 self,
31 api_url: str = DEFAULT_RGD_API,
32 username: Optional[str] = None,
33 password: Optional[str] = None,
34 ):
35 """
36 Initialize a RGD Client.
37
38 Args:
39 api_url: The base url of the RGD API instance.
40 username: The username to authenticate to the instance with, if any.
41 password: The password associated with the provided username. If None, a prompt will be provided.
42
43 Returns:
44 A new Rgdc instance.
45 """
46 auth_header = None
47
48 # Prompt for password if not provided
49 if username is not None and password is None:
50 password = getpass.getpass()
51
52 if username and password:
53 encoded_credentials = b64encode(f'{username}:{password}'.encode('utf-8')).decode()
54 auth_header = f'Basic {encoded_credentials}'
55
56 self.session = RgdcSession(base_url=api_url, auth_header=auth_header)
57
58 def list_image_tiles(self, image_id: Union[str, int]) -> Dict:
59 """List geodata imagery tiles."""
60 r = self.session.get(f'geoprocess/imagery/{image_id}/tiles')
61 return r.json()
62
63 def download_image_file(
64 self, image_id: Union[str, int], chunk_size: int = 1024 * 1024
65 ) -> Iterator[bytes]:
66 """
67 Download the associated ImageFile data for this ImageEntry directly from S3.
68
69 Args:
70 image_id: The ID of the ImageEntry to download.
71 chunk_size: The size (in bytes) of each item in the returned iterator (defaults to 1MB).
72
73 Returns:
74 An iterator of byte chunks.
75 """
76 r = self.session.get(f'geodata/imagery/{image_id}/data', stream=True)
77 return r.iter_content(chunk_size=chunk_size)
78
79 def download_image_thumbnail(
80 self,
81 image_id: Union[str, int],
82 ) -> bytes:
83 """
84 Download the generated thumbnail for this ImageEntry.
85
86 Args:
87 image_id: The ID of the ImageEntry to download.
88
89 Returns:
90 Thumbnail bytes.
91 """
92 r = self.session.get(f'geoprocess/imagery/{image_id}/thumbnail')
93 return r.content
94
95 def download_raster_thumbnail(
96 self,
97 raster_meta_id: Union[str, int, dict],
98 band: int = 0,
99 ) -> bytes:
100 """
101 Download the generated thumbnail for this ImageEntry.
102
103 Args:
104 raster_meta_id: The id of the RasterMetaEntry, which is a child to the desired raster entry, or search result.
105 band: The index of the image in the raster's image set to produce thumbnail from.
106
107 Returns:
108 Thumbnail bytes.
109 """
110 if isinstance(raster_meta_id, dict):
111 raster_meta_id = spatial_subentry_id(raster_meta_id)
112
113 r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}')
114 parent_raster = r.json().get('parent_raster', {})
115 images = parent_raster.get('image_set', {}).get('images', [])
116 try:
117 return self.download_image_thumbnail(images[band]['id'])
118 except IndexError:
119 raise IndexError(f'Band index ({band}) out of range.')
120
121 def get_raster(self, raster_meta_id: Union[str, int, dict], stac: bool = False) -> Dict:
122 """Get raster entry detail.
123
124 Args:
125 stac: Optionally return as STAC Item dictionary/JSON.
126
127 Returns:
128 Serialized object representation.
129 """
130 if isinstance(raster_meta_id, dict):
131 raster_meta_id = spatial_subentry_id(raster_meta_id)
132
133 if stac:
134 r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}/stac')
135 else:
136 r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}')
137 return r.json()
138
139 def download_raster(
140 self,
141 raster_meta_id: Union[str, int, dict],
142 pathname: Optional[str] = None,
143 nest_with_name: bool = False,
144 keep_existing: bool = True,
145 ) -> RasterDownload:
146 """
147 Download the image set associated with a raster entry to disk.
148
149 Args:
150 raster_meta_id: The id of the RasterMetaEntry, which is a child to the desired raster entry, or search result.
151 pathname: The directory to download the image set to. If not supplied, a temporary directory will be used.
152 nest_with_name: If True, nests the download within an additional directory, using the raster entry name.
153 keep_existing: If False, replace files existing on disk. Only valid if `pathname` is given.
154
155 Returns:
156 A dictionary of the paths to all files downloaded under the directory.
157 """
158 if isinstance(raster_meta_id, dict):
159 raster_meta_id = spatial_subentry_id(raster_meta_id)
160
161 r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}')
162 parent_raster = r.json().get('parent_raster', {})
163
164 # Create dirs after request to avoid empty dirs if failed
165 if pathname is None:
166 pathname = tempfile.mkdtemp()
167
168 # Handle optional nesting with raster entry name
169 path = Path(pathname)
170 parent_raster_name: Optional[str] = parent_raster.get('name')
171
172 if nest_with_name and parent_raster_name:
173 path = path / parent_raster_name
174
175 # Ensure base download directory exists
176 if not path.exists():
177 path.mkdir()
178
179 # Initialize dataclass
180 raster_download = RasterDownload(path, [], [])
181
182 # Download images
183 images = parent_raster.get('image_set', {}).get('images', [])
184 for image in tqdm(images, desc='Downloading image files'):
185 file = image.get('image_file', {}).get('file', {})
186 file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)
187 if file_path:
188 raster_download.images.append(file_path)
189
190 # Download ancillary files
191 ancillary = parent_raster.get('ancillary_files', [])
192 for file in tqdm(ancillary, desc='Downloading ancillary files'):
193 file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)
194 if file_path:
195 raster_download.ancillary.append(file_path)
196
197 return raster_download
198
199 def search(
200 self,
201 query: Optional[Union[Dict, str]] = None,
202 predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,
203 relates: Optional[str] = None,
204 distance: Optional[Tuple[float, float]] = None,
205 acquired: Optional[DATETIME_OR_STR_TUPLE] = None,
206 instrumentation: Optional[str] = None,
207 limit: Optional[int] = None,
208 offset: Optional[int] = None,
209 ) -> List[Dict]:
210 """
211 Search for geospatial entries based on various criteria.
212
213 For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.
214 E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.
215
216 Args:
217 query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.
218 predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will
219 be used to filter data such that predicate(a, b) where b is the queried geometry.
220 relates: Specify exactly how the queried geometry should relate to the data using a
221 DE-9IM string code.
222 distance: The min/max distance around the queried geometry in meters.
223 acquired: The min/max date and time (ISO 8601) when data was acquired.
224 instrumentation: The instrumentation used to acquire at least one of these data.
225 limit: The maximum number of results to return.
226 offset: The number of results to skip.
227
228 Returns:
229 A list of Spatial Entries.
230 """
231 params = spatial_search_params(
232 query=query,
233 predicate=predicate,
234 relates=relates,
235 distance=distance,
236 acquired=acquired,
237 instrumentation=instrumentation,
238 limit=limit,
239 offset=offset,
240 )
241 return list(limit_offset_pager(self.session, 'geosearch', params=params))
242
243 def search_raster_stac(
244 self,
245 query: Optional[Union[Dict, str]] = None,
246 predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,
247 relates: Optional[str] = None,
248 distance: Optional[Tuple[float, float]] = None,
249 acquired: Optional[DATETIME_OR_STR_TUPLE] = None,
250 instrumentation: Optional[str] = None,
251 num_bands: Optional[Tuple[int, int]] = None,
252 resolution: Optional[Tuple[int, int]] = None,
253 cloud_cover: Optional[Tuple[float, float]] = None,
254 limit: Optional[int] = None,
255 offset: Optional[int] = None,
256 ) -> List[Dict]:
257 """
258 Search for raster entries based on various criteria.
259
260 For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.
261 E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.
262
263 Args:
264 query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.
265 predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will
266 be used to filter data such that predicate(a, b) where b is the queried geometry.
267 relates: Specify exactly how the queried geometry should relate to the data using a
268 DE-9IM string code.
269 distance: The min/max distance around the queried geometry in meters.
270 acquired: The min/max date and time (ISO 8601) when data was acquired.
271 instrumentation: The instrumentation used to acquire at least one of these data.
272 num_bands: The min/max number of bands in the raster.
273 resolution: The min/max resolution of the raster.
274 cloud_cover: The min/max cloud coverage of the raster.
275 limit: The maximum number of results to return.
276 offset: The number of results to skip.
277
278 Returns:
279 A list of Spatial Entries in STAC Item format.
280 """
281 params = spatial_search_params(
282 query=query,
283 predicate=predicate,
284 relates=relates,
285 distance=distance,
286 acquired=acquired,
287 instrumentation=instrumentation,
288 limit=limit,
289 offset=offset,
290 )
291
292 if num_bands and len(num_bands) == 2:
293 nbmin, nbmax = num_bands
294 params['num_bands_min'] = nbmin
295 params['num_bands_max'] = nbmax
296
297 if resolution and len(resolution) == 2:
298 rmin, rmax = resolution
299 params['resolution_min'] = rmin
300 params['resolution_max'] = rmax
301
302 if cloud_cover and len(cloud_cover) == 2:
303 ccmin, ccmax = cloud_cover
304 params['cloud_cover_min'] = ccmin
305 params['cloud_cover_max'] = ccmax
306
307 return list(limit_offset_pager(self.session, 'geosearch/raster', params=params))
308
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rgdc/rgdc/rgdc.py b/rgdc/rgdc/rgdc.py
--- a/rgdc/rgdc/rgdc.py
+++ b/rgdc/rgdc/rgdc.py
@@ -240,6 +240,13 @@
)
return list(limit_offset_pager(self.session, 'geosearch', params=params))
+ def create_raster_stac(self, raster: Dict) -> Dict:
+ """Create a raster entry using STAC format."""
+ r = self.session.post('geodata/imagery/raster/stac', json=raster)
+ r.raise_for_status()
+
+ return r.json()
+
def search_raster_stac(
self,
query: Optional[Union[Dict, str]] = None,
| {"golden_diff": "diff --git a/rgdc/rgdc/rgdc.py b/rgdc/rgdc/rgdc.py\n--- a/rgdc/rgdc/rgdc.py\n+++ b/rgdc/rgdc/rgdc.py\n@@ -240,6 +240,13 @@\n )\n return list(limit_offset_pager(self.session, 'geosearch', params=params))\n \n+ def create_raster_stac(self, raster: Dict) -> Dict:\n+ \"\"\"Create a raster entry using STAC format.\"\"\"\n+ r = self.session.post('geodata/imagery/raster/stac', json=raster)\n+ r.raise_for_status()\n+\n+ return r.json()\n+\n def search_raster_stac(\n self,\n query: Optional[Union[Dict, str]] = None,\n", "issue": "expose STAC post endpoint in Python client\nfor https://github.com/ResonantGeoData/issue-dashboard/issues/11\n", "before_files": [{"content": "from base64 import b64encode\nfrom dataclasses import dataclass\nimport getpass\nfrom pathlib import Path\nimport tempfile\nfrom typing import Dict, Iterator, List, Optional, Tuple, Union\n\nfrom tqdm import tqdm\n\nfrom .session import RgdcSession\nfrom .types import DATETIME_OR_STR_TUPLE, SEARCH_PREDICATE_CHOICE\nfrom .utils import (\n DEFAULT_RGD_API,\n download_checksum_file_to_path,\n limit_offset_pager,\n spatial_search_params,\n spatial_subentry_id,\n)\n\n\n@dataclass\nclass RasterDownload:\n path: Path\n images: List[Path]\n ancillary: List[Path]\n\n\nclass Rgdc:\n def __init__(\n self,\n api_url: str = DEFAULT_RGD_API,\n username: Optional[str] = None,\n password: Optional[str] = None,\n ):\n \"\"\"\n Initialize a RGD Client.\n\n Args:\n api_url: The base url of the RGD API instance.\n username: The username to authenticate to the instance with, if any.\n password: The password associated with the provided username. If None, a prompt will be provided.\n\n Returns:\n A new Rgdc instance.\n \"\"\"\n auth_header = None\n\n # Prompt for password if not provided\n if username is not None and password is None:\n password = getpass.getpass()\n\n if username and password:\n encoded_credentials = b64encode(f'{username}:{password}'.encode('utf-8')).decode()\n auth_header = f'Basic {encoded_credentials}'\n\n self.session = RgdcSession(base_url=api_url, auth_header=auth_header)\n\n def list_image_tiles(self, image_id: Union[str, int]) -> Dict:\n \"\"\"List geodata imagery tiles.\"\"\"\n r = self.session.get(f'geoprocess/imagery/{image_id}/tiles')\n return r.json()\n\n def download_image_file(\n self, image_id: Union[str, int], chunk_size: int = 1024 * 1024\n ) -> Iterator[bytes]:\n \"\"\"\n Download the associated ImageFile data for this ImageEntry directly from S3.\n\n Args:\n image_id: The ID of the ImageEntry to download.\n chunk_size: The size (in bytes) of each item in the returned iterator (defaults to 1MB).\n\n Returns:\n An iterator of byte chunks.\n \"\"\"\n r = self.session.get(f'geodata/imagery/{image_id}/data', stream=True)\n return r.iter_content(chunk_size=chunk_size)\n\n def download_image_thumbnail(\n self,\n image_id: Union[str, int],\n ) -> bytes:\n \"\"\"\n Download the generated thumbnail for this ImageEntry.\n\n Args:\n image_id: The ID of the ImageEntry to download.\n\n Returns:\n Thumbnail bytes.\n \"\"\"\n r = self.session.get(f'geoprocess/imagery/{image_id}/thumbnail')\n return r.content\n\n def download_raster_thumbnail(\n self,\n raster_meta_id: Union[str, int, dict],\n band: int = 0,\n ) -> bytes:\n \"\"\"\n Download the generated thumbnail for this ImageEntry.\n\n Args:\n raster_meta_id: The id of the RasterMetaEntry, which is a child to the desired raster entry, or search result.\n band: The index of the image in the raster's image set to produce thumbnail from.\n\n Returns:\n Thumbnail bytes.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}')\n parent_raster = r.json().get('parent_raster', {})\n images = parent_raster.get('image_set', {}).get('images', [])\n try:\n return self.download_image_thumbnail(images[band]['id'])\n except IndexError:\n raise IndexError(f'Band index ({band}) out of range.')\n\n def get_raster(self, raster_meta_id: Union[str, int, dict], stac: bool = False) -> Dict:\n \"\"\"Get raster entry detail.\n\n Args:\n stac: Optionally return as STAC Item dictionary/JSON.\n\n Returns:\n Serialized object representation.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n if stac:\n r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}/stac')\n else:\n r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}')\n return r.json()\n\n def download_raster(\n self,\n raster_meta_id: Union[str, int, dict],\n pathname: Optional[str] = None,\n nest_with_name: bool = False,\n keep_existing: bool = True,\n ) -> RasterDownload:\n \"\"\"\n Download the image set associated with a raster entry to disk.\n\n Args:\n raster_meta_id: The id of the RasterMetaEntry, which is a child to the desired raster entry, or search result.\n pathname: The directory to download the image set to. If not supplied, a temporary directory will be used.\n nest_with_name: If True, nests the download within an additional directory, using the raster entry name.\n keep_existing: If False, replace files existing on disk. Only valid if `pathname` is given.\n\n Returns:\n A dictionary of the paths to all files downloaded under the directory.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}')\n parent_raster = r.json().get('parent_raster', {})\n\n # Create dirs after request to avoid empty dirs if failed\n if pathname is None:\n pathname = tempfile.mkdtemp()\n\n # Handle optional nesting with raster entry name\n path = Path(pathname)\n parent_raster_name: Optional[str] = parent_raster.get('name')\n\n if nest_with_name and parent_raster_name:\n path = path / parent_raster_name\n\n # Ensure base download directory exists\n if not path.exists():\n path.mkdir()\n\n # Initialize dataclass\n raster_download = RasterDownload(path, [], [])\n\n # Download images\n images = parent_raster.get('image_set', {}).get('images', [])\n for image in tqdm(images, desc='Downloading image files'):\n file = image.get('image_file', {}).get('file', {})\n file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)\n if file_path:\n raster_download.images.append(file_path)\n\n # Download ancillary files\n ancillary = parent_raster.get('ancillary_files', [])\n for file in tqdm(ancillary, desc='Downloading ancillary files'):\n file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)\n if file_path:\n raster_download.ancillary.append(file_path)\n\n return raster_download\n\n def search(\n self,\n query: Optional[Union[Dict, str]] = None,\n predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,\n relates: Optional[str] = None,\n distance: Optional[Tuple[float, float]] = None,\n acquired: Optional[DATETIME_OR_STR_TUPLE] = None,\n instrumentation: Optional[str] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[Dict]:\n \"\"\"\n Search for geospatial entries based on various criteria.\n\n For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.\n E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.\n\n Args:\n query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.\n predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will\n be used to filter data such that predicate(a, b) where b is the queried geometry.\n relates: Specify exactly how the queried geometry should relate to the data using a\n DE-9IM string code.\n distance: The min/max distance around the queried geometry in meters.\n acquired: The min/max date and time (ISO 8601) when data was acquired.\n instrumentation: The instrumentation used to acquire at least one of these data.\n limit: The maximum number of results to return.\n offset: The number of results to skip.\n\n Returns:\n A list of Spatial Entries.\n \"\"\"\n params = spatial_search_params(\n query=query,\n predicate=predicate,\n relates=relates,\n distance=distance,\n acquired=acquired,\n instrumentation=instrumentation,\n limit=limit,\n offset=offset,\n )\n return list(limit_offset_pager(self.session, 'geosearch', params=params))\n\n def search_raster_stac(\n self,\n query: Optional[Union[Dict, str]] = None,\n predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,\n relates: Optional[str] = None,\n distance: Optional[Tuple[float, float]] = None,\n acquired: Optional[DATETIME_OR_STR_TUPLE] = None,\n instrumentation: Optional[str] = None,\n num_bands: Optional[Tuple[int, int]] = None,\n resolution: Optional[Tuple[int, int]] = None,\n cloud_cover: Optional[Tuple[float, float]] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[Dict]:\n \"\"\"\n Search for raster entries based on various criteria.\n\n For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.\n E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.\n\n Args:\n query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.\n predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will\n be used to filter data such that predicate(a, b) where b is the queried geometry.\n relates: Specify exactly how the queried geometry should relate to the data using a\n DE-9IM string code.\n distance: The min/max distance around the queried geometry in meters.\n acquired: The min/max date and time (ISO 8601) when data was acquired.\n instrumentation: The instrumentation used to acquire at least one of these data.\n num_bands: The min/max number of bands in the raster.\n resolution: The min/max resolution of the raster.\n cloud_cover: The min/max cloud coverage of the raster.\n limit: The maximum number of results to return.\n offset: The number of results to skip.\n\n Returns:\n A list of Spatial Entries in STAC Item format.\n \"\"\"\n params = spatial_search_params(\n query=query,\n predicate=predicate,\n relates=relates,\n distance=distance,\n acquired=acquired,\n instrumentation=instrumentation,\n limit=limit,\n offset=offset,\n )\n\n if num_bands and len(num_bands) == 2:\n nbmin, nbmax = num_bands\n params['num_bands_min'] = nbmin\n params['num_bands_max'] = nbmax\n\n if resolution and len(resolution) == 2:\n rmin, rmax = resolution\n params['resolution_min'] = rmin\n params['resolution_max'] = rmax\n\n if cloud_cover and len(cloud_cover) == 2:\n ccmin, ccmax = cloud_cover\n params['cloud_cover_min'] = ccmin\n params['cloud_cover_max'] = ccmax\n\n return list(limit_offset_pager(self.session, 'geosearch/raster', params=params))\n", "path": "rgdc/rgdc/rgdc.py"}], "after_files": [{"content": "from base64 import b64encode\nfrom dataclasses import dataclass\nimport getpass\nfrom pathlib import Path\nimport tempfile\nfrom typing import Dict, Iterator, List, Optional, Tuple, Union\n\nfrom tqdm import tqdm\n\nfrom .session import RgdcSession\nfrom .types import DATETIME_OR_STR_TUPLE, SEARCH_PREDICATE_CHOICE\nfrom .utils import (\n DEFAULT_RGD_API,\n download_checksum_file_to_path,\n limit_offset_pager,\n spatial_search_params,\n spatial_subentry_id,\n)\n\n\n@dataclass\nclass RasterDownload:\n path: Path\n images: List[Path]\n ancillary: List[Path]\n\n\nclass Rgdc:\n def __init__(\n self,\n api_url: str = DEFAULT_RGD_API,\n username: Optional[str] = None,\n password: Optional[str] = None,\n ):\n \"\"\"\n Initialize a RGD Client.\n\n Args:\n api_url: The base url of the RGD API instance.\n username: The username to authenticate to the instance with, if any.\n password: The password associated with the provided username. If None, a prompt will be provided.\n\n Returns:\n A new Rgdc instance.\n \"\"\"\n auth_header = None\n\n # Prompt for password if not provided\n if username is not None and password is None:\n password = getpass.getpass()\n\n if username and password:\n encoded_credentials = b64encode(f'{username}:{password}'.encode('utf-8')).decode()\n auth_header = f'Basic {encoded_credentials}'\n\n self.session = RgdcSession(base_url=api_url, auth_header=auth_header)\n\n def list_image_tiles(self, image_id: Union[str, int]) -> Dict:\n \"\"\"List geodata imagery tiles.\"\"\"\n r = self.session.get(f'geoprocess/imagery/{image_id}/tiles')\n return r.json()\n\n def download_image_file(\n self, image_id: Union[str, int], chunk_size: int = 1024 * 1024\n ) -> Iterator[bytes]:\n \"\"\"\n Download the associated ImageFile data for this ImageEntry directly from S3.\n\n Args:\n image_id: The ID of the ImageEntry to download.\n chunk_size: The size (in bytes) of each item in the returned iterator (defaults to 1MB).\n\n Returns:\n An iterator of byte chunks.\n \"\"\"\n r = self.session.get(f'geodata/imagery/{image_id}/data', stream=True)\n return r.iter_content(chunk_size=chunk_size)\n\n def download_image_thumbnail(\n self,\n image_id: Union[str, int],\n ) -> bytes:\n \"\"\"\n Download the generated thumbnail for this ImageEntry.\n\n Args:\n image_id: The ID of the ImageEntry to download.\n\n Returns:\n Thumbnail bytes.\n \"\"\"\n r = self.session.get(f'geoprocess/imagery/{image_id}/thumbnail')\n return r.content\n\n def download_raster_thumbnail(\n self,\n raster_meta_id: Union[str, int, dict],\n band: int = 0,\n ) -> bytes:\n \"\"\"\n Download the generated thumbnail for this ImageEntry.\n\n Args:\n raster_meta_id: The id of the RasterMetaEntry, which is a child to the desired raster entry, or search result.\n band: The index of the image in the raster's image set to produce thumbnail from.\n\n Returns:\n Thumbnail bytes.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}')\n parent_raster = r.json().get('parent_raster', {})\n images = parent_raster.get('image_set', {}).get('images', [])\n try:\n return self.download_image_thumbnail(images[band]['id'])\n except IndexError:\n raise IndexError(f'Band index ({band}) out of range.')\n\n def get_raster(self, raster_meta_id: Union[str, int, dict], stac: bool = False) -> Dict:\n \"\"\"Get raster entry detail.\n\n Args:\n stac: Optionally return as STAC Item dictionary/JSON.\n\n Returns:\n Serialized object representation.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n if stac:\n r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}/stac')\n else:\n r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}')\n return r.json()\n\n def download_raster(\n self,\n raster_meta_id: Union[str, int, dict],\n pathname: Optional[str] = None,\n nest_with_name: bool = False,\n keep_existing: bool = True,\n ) -> RasterDownload:\n \"\"\"\n Download the image set associated with a raster entry to disk.\n\n Args:\n raster_meta_id: The id of the RasterMetaEntry, which is a child to the desired raster entry, or search result.\n pathname: The directory to download the image set to. If not supplied, a temporary directory will be used.\n nest_with_name: If True, nests the download within an additional directory, using the raster entry name.\n keep_existing: If False, replace files existing on disk. Only valid if `pathname` is given.\n\n Returns:\n A dictionary of the paths to all files downloaded under the directory.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n r = self.session.get(f'geodata/imagery/raster/{raster_meta_id}')\n parent_raster = r.json().get('parent_raster', {})\n\n # Create dirs after request to avoid empty dirs if failed\n if pathname is None:\n pathname = tempfile.mkdtemp()\n\n # Handle optional nesting with raster entry name\n path = Path(pathname)\n parent_raster_name: Optional[str] = parent_raster.get('name')\n\n if nest_with_name and parent_raster_name:\n path = path / parent_raster_name\n\n # Ensure base download directory exists\n if not path.exists():\n path.mkdir()\n\n # Initialize dataclass\n raster_download = RasterDownload(path, [], [])\n\n # Download images\n images = parent_raster.get('image_set', {}).get('images', [])\n for image in tqdm(images, desc='Downloading image files'):\n file = image.get('image_file', {}).get('file', {})\n file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)\n if file_path:\n raster_download.images.append(file_path)\n\n # Download ancillary files\n ancillary = parent_raster.get('ancillary_files', [])\n for file in tqdm(ancillary, desc='Downloading ancillary files'):\n file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)\n if file_path:\n raster_download.ancillary.append(file_path)\n\n return raster_download\n\n def search(\n self,\n query: Optional[Union[Dict, str]] = None,\n predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,\n relates: Optional[str] = None,\n distance: Optional[Tuple[float, float]] = None,\n acquired: Optional[DATETIME_OR_STR_TUPLE] = None,\n instrumentation: Optional[str] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[Dict]:\n \"\"\"\n Search for geospatial entries based on various criteria.\n\n For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.\n E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.\n\n Args:\n query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.\n predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will\n be used to filter data such that predicate(a, b) where b is the queried geometry.\n relates: Specify exactly how the queried geometry should relate to the data using a\n DE-9IM string code.\n distance: The min/max distance around the queried geometry in meters.\n acquired: The min/max date and time (ISO 8601) when data was acquired.\n instrumentation: The instrumentation used to acquire at least one of these data.\n limit: The maximum number of results to return.\n offset: The number of results to skip.\n\n Returns:\n A list of Spatial Entries.\n \"\"\"\n params = spatial_search_params(\n query=query,\n predicate=predicate,\n relates=relates,\n distance=distance,\n acquired=acquired,\n instrumentation=instrumentation,\n limit=limit,\n offset=offset,\n )\n return list(limit_offset_pager(self.session, 'geosearch', params=params))\n\n def create_raster_stac(self, raster: Dict) -> Dict:\n \"\"\"Create a raster entry using STAC format.\"\"\"\n r = self.session.post('geodata/imagery/raster/stac', json=raster)\n r.raise_for_status()\n\n return r.json()\n\n def search_raster_stac(\n self,\n query: Optional[Union[Dict, str]] = None,\n predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,\n relates: Optional[str] = None,\n distance: Optional[Tuple[float, float]] = None,\n acquired: Optional[DATETIME_OR_STR_TUPLE] = None,\n instrumentation: Optional[str] = None,\n num_bands: Optional[Tuple[int, int]] = None,\n resolution: Optional[Tuple[int, int]] = None,\n cloud_cover: Optional[Tuple[float, float]] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[Dict]:\n \"\"\"\n Search for raster entries based on various criteria.\n\n For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.\n E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.\n\n Args:\n query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.\n predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will\n be used to filter data such that predicate(a, b) where b is the queried geometry.\n relates: Specify exactly how the queried geometry should relate to the data using a\n DE-9IM string code.\n distance: The min/max distance around the queried geometry in meters.\n acquired: The min/max date and time (ISO 8601) when data was acquired.\n instrumentation: The instrumentation used to acquire at least one of these data.\n num_bands: The min/max number of bands in the raster.\n resolution: The min/max resolution of the raster.\n cloud_cover: The min/max cloud coverage of the raster.\n limit: The maximum number of results to return.\n offset: The number of results to skip.\n\n Returns:\n A list of Spatial Entries in STAC Item format.\n \"\"\"\n params = spatial_search_params(\n query=query,\n predicate=predicate,\n relates=relates,\n distance=distance,\n acquired=acquired,\n instrumentation=instrumentation,\n limit=limit,\n offset=offset,\n )\n\n if num_bands and len(num_bands) == 2:\n nbmin, nbmax = num_bands\n params['num_bands_min'] = nbmin\n params['num_bands_max'] = nbmax\n\n if resolution and len(resolution) == 2:\n rmin, rmax = resolution\n params['resolution_min'] = rmin\n params['resolution_max'] = rmax\n\n if cloud_cover and len(cloud_cover) == 2:\n ccmin, ccmax = cloud_cover\n params['cloud_cover_min'] = ccmin\n params['cloud_cover_max'] = ccmax\n\n return list(limit_offset_pager(self.session, 'geosearch/raster', params=params))\n", "path": "rgdc/rgdc/rgdc.py"}]} | 3,762 | 179 |
gh_patches_debug_20801 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-3343 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Internal Server Error when searching with partial dates in Find a Record
## Description
<!-- A clear and concise description of what the bug is. -->
Sometimes when partially entering the date in the Record Search Field, it randomly throws internal server errors.
Sometimes it throws the intended `InvalidDateFormatAPIException` but other times it `UnboundLocalError`.
Here is a recording of the behaviour:
https://github.com/mathesar-foundation/mathesar/assets/31622972/6e7885c2-9d19-473a-86e0-8c63c5e68c0f
Also, it would be good UI/UX to show the user the date is invalid. In case when the user has just put an incorrect date time format and the server returns a 400 BAD REQUEST InvalidDateFormatAPIException. The user won't know as the table still shows the loading state.
<img width="1700" alt="image" src="https://github.com/mathesar-foundation/mathesar/assets/31622972/78fac439-c326-47ae-8189-08dc61963d75">
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
- Server should not throw 500 internal server error and always throw InvalidDateFormatAPIException in case of wrong date format.
- User should be conveyed if their input date is incorrect.
## To Reproduce
<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->
## Environment
- OS: macOS
- Browser: Chrome
- Other info:
## Additional context
<!-- Add any other context about the problem or screenshots here. -->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/api/db/viewsets/records.py`
Content:
```
1 from psycopg2.errors import ForeignKeyViolation, InvalidDatetimeFormat
2 from rest_access_policy import AccessViewSetMixin
3 from rest_framework import status, viewsets
4 from rest_framework.exceptions import NotFound, MethodNotAllowed
5 from rest_framework.renderers import BrowsableAPIRenderer
6 from rest_framework.response import Response
7 from rest_framework.permissions import IsAuthenticatedOrReadOnly
8 from sqlalchemy.exc import IntegrityError, DataError
9
10 from mathesar.api.db.permissions.records import RecordAccessPolicy
11 from mathesar.api.exceptions.error_codes import ErrorCodes
12 import mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions
13 import mathesar.api.exceptions.generic_exceptions.base_exceptions as generic_api_exceptions
14 from db.functions.exceptions import (
15 BadDBFunctionFormat, ReferencedColumnsDontExist, UnknownDBFunctionID,
16 )
17 from db.records.exceptions import (
18 BadGroupFormat, GroupFieldNotFound, InvalidGroupType, UndefinedFunction,
19 BadSortFormat, SortFieldNotFound
20 )
21 from mathesar.api.pagination import TableLimitOffsetPagination
22 from mathesar.api.serializers.records import RecordListParameterSerializer, RecordSerializer
23 from mathesar.api.utils import get_table_or_404
24 from mathesar.functions.operations.convert import rewrite_db_function_spec_column_ids_to_names
25 from mathesar.models.base import Table
26 from mathesar.utils.json import MathesarJSONRenderer
27
28
29 class RecordViewSet(AccessViewSetMixin, viewsets.ViewSet):
30 permission_classes = [IsAuthenticatedOrReadOnly]
31 access_policy = RecordAccessPolicy
32
33 # There is no 'update' method.
34 # We're not supporting PUT requests because there aren't a lot of use cases
35 # where the entire record needs to be replaced, PATCH suffices for updates.
36 def get_queryset(self):
37 return Table.objects.all().order_by('-created_at')
38
39 renderer_classes = [MathesarJSONRenderer, BrowsableAPIRenderer]
40
41 # For filter parameter formatting, see:
42 # db/functions/operations/deserialize.py::get_db_function_from_ma_function_spec function doc>
43 # For sorting parameter formatting, see:
44 # https://github.com/centerofci/sqlalchemy-filters#sort-format
45 def list(self, request, table_pk=None):
46 paginator = TableLimitOffsetPagination()
47
48 serializer = RecordListParameterSerializer(data=request.GET)
49 serializer.is_valid(raise_exception=True)
50 table = get_table_or_404(table_pk)
51
52 filter_unprocessed = serializer.validated_data['filter']
53 order_by = serializer.validated_data['order_by']
54 grouping = serializer.validated_data['grouping']
55 search_fuzzy = serializer.validated_data['search_fuzzy']
56 filter_processed = None
57 column_names_to_ids = table.get_column_name_id_bidirectional_map()
58 column_ids_to_names = column_names_to_ids.inverse
59 if filter_unprocessed:
60 filter_processed = rewrite_db_function_spec_column_ids_to_names(
61 column_ids_to_names=column_ids_to_names,
62 spec=filter_unprocessed,
63 )
64 # Replace column id value used in the `field` property with column name
65 name_converted_group_by = None
66 if grouping:
67 group_by_columns_names = [column_ids_to_names[column_id] for column_id in grouping['columns']]
68 name_converted_group_by = {**grouping, 'columns': group_by_columns_names}
69 name_converted_order_by = [{**column, 'field': column_ids_to_names[column['field']]} for column in order_by]
70 name_converted_search = [{**column, 'column': column_ids_to_names[column['field']]} for column in search_fuzzy]
71
72 try:
73 records = paginator.paginate_queryset(
74 self.get_queryset(), request, table, column_names_to_ids,
75 filters=filter_processed,
76 order_by=name_converted_order_by,
77 grouping=name_converted_group_by,
78 search=name_converted_search,
79 duplicate_only=serializer.validated_data['duplicate_only']
80 )
81 except (BadDBFunctionFormat, UnknownDBFunctionID, ReferencedColumnsDontExist) as e:
82 raise database_api_exceptions.BadFilterAPIException(
83 e,
84 field='filters',
85 status_code=status.HTTP_400_BAD_REQUEST
86 )
87 except (BadSortFormat, SortFieldNotFound) as e:
88 raise database_api_exceptions.BadSortAPIException(
89 e,
90 field='order_by',
91 status_code=status.HTTP_400_BAD_REQUEST
92 )
93 except (BadGroupFormat, GroupFieldNotFound, InvalidGroupType) as e:
94 raise database_api_exceptions.BadGroupAPIException(
95 e,
96 field='grouping',
97 status_code=status.HTTP_400_BAD_REQUEST
98 )
99 except UndefinedFunction as e:
100 raise database_api_exceptions.UndefinedFunctionAPIException(
101 e,
102 details=e.args[0],
103 status_code=status.HTTP_400_BAD_REQUEST
104 )
105 except DataError as e:
106 if isinstance(e.orig, InvalidDatetimeFormat):
107 raise database_api_exceptions.InvalidDateFormatAPIException(
108 e,
109 status_code=status.HTTP_400_BAD_REQUEST,
110 )
111
112 serializer = RecordSerializer(
113 records,
114 many=True,
115 context=self.get_serializer_context(table)
116 )
117 return paginator.get_paginated_response(serializer.data)
118
119 def retrieve(self, request, pk=None, table_pk=None):
120 table = get_table_or_404(table_pk)
121 # TODO refactor to use serializer for more DRY response logic
122 paginator = TableLimitOffsetPagination()
123 record_filters = {
124 "equal": [
125 {"column_name": [table.primary_key_column_name]},
126 {"literal": [pk]}
127 ]
128 }
129 column_names_to_ids = table.get_column_name_id_bidirectional_map()
130 records = paginator.paginate_queryset(
131 table,
132 request,
133 table,
134 column_names_to_ids,
135 filters=record_filters
136 )
137 if not records:
138 raise NotFound
139 serializer = RecordSerializer(
140 records,
141 many=True,
142 context=self.get_serializer_context(table)
143 )
144 return paginator.get_paginated_response(serializer.data)
145
146 def create(self, request, table_pk=None):
147 table = get_table_or_404(table_pk)
148 primary_key_column_name = None
149 try:
150 primary_key_column_name = table.primary_key_column_name
151 except AssertionError:
152 raise generic_api_exceptions.MethodNotAllowedAPIException(
153 MethodNotAllowed,
154 error_code=ErrorCodes.MethodNotAllowed.value,
155 message="You cannot insert into tables without a primary key"
156 )
157 serializer = RecordSerializer(data=request.data, context=self.get_serializer_context(table))
158 serializer.is_valid(raise_exception=True)
159 serializer.save()
160 # TODO refactor to use serializer for more DRY response logic
161 column_name_id_map = table.get_column_name_id_bidirectional_map()
162 table_pk_column_id = column_name_id_map[primary_key_column_name]
163 pk_value = serializer.data[table_pk_column_id]
164 paginator = TableLimitOffsetPagination()
165 record_filters = {
166 "equal": [
167 {"column_name": [primary_key_column_name]},
168 {"literal": [pk_value]}
169 ]
170 }
171 column_names_to_ids = table.get_column_name_id_bidirectional_map()
172 records = paginator.paginate_queryset(
173 table,
174 request,
175 table,
176 column_names_to_ids,
177 filters=record_filters
178 )
179 serializer = RecordSerializer(
180 records,
181 many=True,
182 context=self.get_serializer_context(table)
183 )
184 response = paginator.get_paginated_response(serializer.data)
185 response.status_code = status.HTTP_201_CREATED
186 return response
187
188 def partial_update(self, request, pk=None, table_pk=None):
189 table = get_table_or_404(table_pk)
190 serializer = RecordSerializer(
191 {'id': pk},
192 data=request.data,
193 context=self.get_serializer_context(table),
194 partial=True
195 )
196 serializer.is_valid(raise_exception=True)
197 serializer.save()
198 # TODO refactor to use serializer for more DRY response logic
199 paginator = TableLimitOffsetPagination()
200 record_filters = {
201 "equal": [
202 {"column_name": [table.primary_key_column_name]},
203 {"literal": [pk]}
204 ]
205 }
206 column_names_to_ids = table.get_column_name_id_bidirectional_map()
207 records = paginator.paginate_queryset(
208 table,
209 request,
210 table,
211 column_names_to_ids,
212 filters=record_filters
213 )
214 serializer = RecordSerializer(
215 records,
216 many=True,
217 context=self.get_serializer_context(table)
218 )
219 return paginator.get_paginated_response(serializer.data)
220
221 def destroy(self, request, pk=None, table_pk=None):
222 table = get_table_or_404(table_pk)
223 if table.get_record(pk) is None:
224 raise generic_api_exceptions.NotFoundAPIException(
225 NotFound,
226 error_code=ErrorCodes.RecordNotFound.value,
227 message="Record doesn't exist"
228 )
229 try:
230 table.delete_record(pk)
231 except IntegrityError as e:
232 if isinstance(e.orig, ForeignKeyViolation):
233 raise database_api_exceptions.ForeignKeyViolationAPIException(
234 e,
235 status_code=status.HTTP_400_BAD_REQUEST,
236 referent_table=table,
237 )
238
239 return Response(status=status.HTTP_204_NO_CONTENT)
240
241 def get_serializer_context(self, table):
242 columns_map = table.get_column_name_id_bidirectional_map()
243 context = {'columns_map': columns_map, 'table': table}
244 return context
245
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mathesar/api/db/viewsets/records.py b/mathesar/api/db/viewsets/records.py
--- a/mathesar/api/db/viewsets/records.py
+++ b/mathesar/api/db/viewsets/records.py
@@ -1,4 +1,4 @@
-from psycopg2.errors import ForeignKeyViolation, InvalidDatetimeFormat
+from psycopg2.errors import ForeignKeyViolation, InvalidDatetimeFormat, DatetimeFieldOverflow
from rest_access_policy import AccessViewSetMixin
from rest_framework import status, viewsets
from rest_framework.exceptions import NotFound, MethodNotAllowed
@@ -108,6 +108,16 @@
e,
status_code=status.HTTP_400_BAD_REQUEST,
)
+ elif isinstance(e.orig, DatetimeFieldOverflow):
+ raise database_api_exceptions.InvalidDateAPIException(
+ e,
+ status_code=status.HTTP_400_BAD_REQUEST,
+ )
+ else:
+ raise database_api_exceptions.MathesarAPIException(
+ e,
+ status_code=status.HTTP_400_BAD_REQUEST
+ )
serializer = RecordSerializer(
records,
| {"golden_diff": "diff --git a/mathesar/api/db/viewsets/records.py b/mathesar/api/db/viewsets/records.py\n--- a/mathesar/api/db/viewsets/records.py\n+++ b/mathesar/api/db/viewsets/records.py\n@@ -1,4 +1,4 @@\n-from psycopg2.errors import ForeignKeyViolation, InvalidDatetimeFormat\n+from psycopg2.errors import ForeignKeyViolation, InvalidDatetimeFormat, DatetimeFieldOverflow\n from rest_access_policy import AccessViewSetMixin\n from rest_framework import status, viewsets\n from rest_framework.exceptions import NotFound, MethodNotAllowed\n@@ -108,6 +108,16 @@\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n )\n+ elif isinstance(e.orig, DatetimeFieldOverflow):\n+ raise database_api_exceptions.InvalidDateAPIException(\n+ e,\n+ status_code=status.HTTP_400_BAD_REQUEST,\n+ )\n+ else:\n+ raise database_api_exceptions.MathesarAPIException(\n+ e,\n+ status_code=status.HTTP_400_BAD_REQUEST\n+ )\n \n serializer = RecordSerializer(\n records,\n", "issue": "Internal Server Error when searching with partial dates in Find a Record\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\nSometimes when partially entering the date in the Record Search Field, it randomly throws internal server errors.\r\nSometimes it throws the intended `InvalidDateFormatAPIException` but other times it `UnboundLocalError`.\r\n\r\nHere is a recording of the behaviour:\r\n\r\nhttps://github.com/mathesar-foundation/mathesar/assets/31622972/6e7885c2-9d19-473a-86e0-8c63c5e68c0f\r\n\r\n\r\nAlso, it would be good UI/UX to show the user the date is invalid. In case when the user has just put an incorrect date time format and the server returns a 400 BAD REQUEST InvalidDateFormatAPIException. The user won't know as the table still shows the loading state.\r\n<img width=\"1700\" alt=\"image\" src=\"https://github.com/mathesar-foundation/mathesar/assets/31622972/78fac439-c326-47ae-8189-08dc61963d75\">\r\n\r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n- Server should not throw 500 internal server error and always throw InvalidDateFormatAPIException in case of wrong date format.\r\n- User should be conveyed if their input date is incorrect.\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\n\r\n## Environment\r\n - OS: macOS\r\n - Browser: Chrome\r\n - Other info:\r\n\r\n## Additional context\r\n<!-- Add any other context about the problem or screenshots here. -->\r\n\n", "before_files": [{"content": "from psycopg2.errors import ForeignKeyViolation, InvalidDatetimeFormat\nfrom rest_access_policy import AccessViewSetMixin\nfrom rest_framework import status, viewsets\nfrom rest_framework.exceptions import NotFound, MethodNotAllowed\nfrom rest_framework.renderers import BrowsableAPIRenderer\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\nfrom sqlalchemy.exc import IntegrityError, DataError\n\nfrom mathesar.api.db.permissions.records import RecordAccessPolicy\nfrom mathesar.api.exceptions.error_codes import ErrorCodes\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nimport mathesar.api.exceptions.generic_exceptions.base_exceptions as generic_api_exceptions\nfrom db.functions.exceptions import (\n BadDBFunctionFormat, ReferencedColumnsDontExist, UnknownDBFunctionID,\n)\nfrom db.records.exceptions import (\n BadGroupFormat, GroupFieldNotFound, InvalidGroupType, UndefinedFunction,\n BadSortFormat, SortFieldNotFound\n)\nfrom mathesar.api.pagination import TableLimitOffsetPagination\nfrom mathesar.api.serializers.records import RecordListParameterSerializer, RecordSerializer\nfrom mathesar.api.utils import get_table_or_404\nfrom mathesar.functions.operations.convert import rewrite_db_function_spec_column_ids_to_names\nfrom mathesar.models.base import Table\nfrom mathesar.utils.json import MathesarJSONRenderer\n\n\nclass RecordViewSet(AccessViewSetMixin, viewsets.ViewSet):\n permission_classes = [IsAuthenticatedOrReadOnly]\n access_policy = RecordAccessPolicy\n\n # There is no 'update' method.\n # We're not supporting PUT requests because there aren't a lot of use cases\n # where the entire record needs to be replaced, PATCH suffices for updates.\n def get_queryset(self):\n return Table.objects.all().order_by('-created_at')\n\n renderer_classes = [MathesarJSONRenderer, BrowsableAPIRenderer]\n\n # For filter parameter formatting, see:\n # db/functions/operations/deserialize.py::get_db_function_from_ma_function_spec function doc>\n # For sorting parameter formatting, see:\n # https://github.com/centerofci/sqlalchemy-filters#sort-format\n def list(self, request, table_pk=None):\n paginator = TableLimitOffsetPagination()\n\n serializer = RecordListParameterSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n table = get_table_or_404(table_pk)\n\n filter_unprocessed = serializer.validated_data['filter']\n order_by = serializer.validated_data['order_by']\n grouping = serializer.validated_data['grouping']\n search_fuzzy = serializer.validated_data['search_fuzzy']\n filter_processed = None\n column_names_to_ids = table.get_column_name_id_bidirectional_map()\n column_ids_to_names = column_names_to_ids.inverse\n if filter_unprocessed:\n filter_processed = rewrite_db_function_spec_column_ids_to_names(\n column_ids_to_names=column_ids_to_names,\n spec=filter_unprocessed,\n )\n # Replace column id value used in the `field` property with column name\n name_converted_group_by = None\n if grouping:\n group_by_columns_names = [column_ids_to_names[column_id] for column_id in grouping['columns']]\n name_converted_group_by = {**grouping, 'columns': group_by_columns_names}\n name_converted_order_by = [{**column, 'field': column_ids_to_names[column['field']]} for column in order_by]\n name_converted_search = [{**column, 'column': column_ids_to_names[column['field']]} for column in search_fuzzy]\n\n try:\n records = paginator.paginate_queryset(\n self.get_queryset(), request, table, column_names_to_ids,\n filters=filter_processed,\n order_by=name_converted_order_by,\n grouping=name_converted_group_by,\n search=name_converted_search,\n duplicate_only=serializer.validated_data['duplicate_only']\n )\n except (BadDBFunctionFormat, UnknownDBFunctionID, ReferencedColumnsDontExist) as e:\n raise database_api_exceptions.BadFilterAPIException(\n e,\n field='filters',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except (BadSortFormat, SortFieldNotFound) as e:\n raise database_api_exceptions.BadSortAPIException(\n e,\n field='order_by',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except (BadGroupFormat, GroupFieldNotFound, InvalidGroupType) as e:\n raise database_api_exceptions.BadGroupAPIException(\n e,\n field='grouping',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except UndefinedFunction as e:\n raise database_api_exceptions.UndefinedFunctionAPIException(\n e,\n details=e.args[0],\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except DataError as e:\n if isinstance(e.orig, InvalidDatetimeFormat):\n raise database_api_exceptions.InvalidDateFormatAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n )\n\n serializer = RecordSerializer(\n records,\n many=True,\n context=self.get_serializer_context(table)\n )\n return paginator.get_paginated_response(serializer.data)\n\n def retrieve(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n # TODO refactor to use serializer for more DRY response logic\n paginator = TableLimitOffsetPagination()\n record_filters = {\n \"equal\": [\n {\"column_name\": [table.primary_key_column_name]},\n {\"literal\": [pk]}\n ]\n }\n column_names_to_ids = table.get_column_name_id_bidirectional_map()\n records = paginator.paginate_queryset(\n table,\n request,\n table,\n column_names_to_ids,\n filters=record_filters\n )\n if not records:\n raise NotFound\n serializer = RecordSerializer(\n records,\n many=True,\n context=self.get_serializer_context(table)\n )\n return paginator.get_paginated_response(serializer.data)\n\n def create(self, request, table_pk=None):\n table = get_table_or_404(table_pk)\n primary_key_column_name = None\n try:\n primary_key_column_name = table.primary_key_column_name\n except AssertionError:\n raise generic_api_exceptions.MethodNotAllowedAPIException(\n MethodNotAllowed,\n error_code=ErrorCodes.MethodNotAllowed.value,\n message=\"You cannot insert into tables without a primary key\"\n )\n serializer = RecordSerializer(data=request.data, context=self.get_serializer_context(table))\n serializer.is_valid(raise_exception=True)\n serializer.save()\n # TODO refactor to use serializer for more DRY response logic\n column_name_id_map = table.get_column_name_id_bidirectional_map()\n table_pk_column_id = column_name_id_map[primary_key_column_name]\n pk_value = serializer.data[table_pk_column_id]\n paginator = TableLimitOffsetPagination()\n record_filters = {\n \"equal\": [\n {\"column_name\": [primary_key_column_name]},\n {\"literal\": [pk_value]}\n ]\n }\n column_names_to_ids = table.get_column_name_id_bidirectional_map()\n records = paginator.paginate_queryset(\n table,\n request,\n table,\n column_names_to_ids,\n filters=record_filters\n )\n serializer = RecordSerializer(\n records,\n many=True,\n context=self.get_serializer_context(table)\n )\n response = paginator.get_paginated_response(serializer.data)\n response.status_code = status.HTTP_201_CREATED\n return response\n\n def partial_update(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n serializer = RecordSerializer(\n {'id': pk},\n data=request.data,\n context=self.get_serializer_context(table),\n partial=True\n )\n serializer.is_valid(raise_exception=True)\n serializer.save()\n # TODO refactor to use serializer for more DRY response logic\n paginator = TableLimitOffsetPagination()\n record_filters = {\n \"equal\": [\n {\"column_name\": [table.primary_key_column_name]},\n {\"literal\": [pk]}\n ]\n }\n column_names_to_ids = table.get_column_name_id_bidirectional_map()\n records = paginator.paginate_queryset(\n table,\n request,\n table,\n column_names_to_ids,\n filters=record_filters\n )\n serializer = RecordSerializer(\n records,\n many=True,\n context=self.get_serializer_context(table)\n )\n return paginator.get_paginated_response(serializer.data)\n\n def destroy(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n if table.get_record(pk) is None:\n raise generic_api_exceptions.NotFoundAPIException(\n NotFound,\n error_code=ErrorCodes.RecordNotFound.value,\n message=\"Record doesn't exist\"\n )\n try:\n table.delete_record(pk)\n except IntegrityError as e:\n if isinstance(e.orig, ForeignKeyViolation):\n raise database_api_exceptions.ForeignKeyViolationAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n referent_table=table,\n )\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def get_serializer_context(self, table):\n columns_map = table.get_column_name_id_bidirectional_map()\n context = {'columns_map': columns_map, 'table': table}\n return context\n", "path": "mathesar/api/db/viewsets/records.py"}], "after_files": [{"content": "from psycopg2.errors import ForeignKeyViolation, InvalidDatetimeFormat, DatetimeFieldOverflow\nfrom rest_access_policy import AccessViewSetMixin\nfrom rest_framework import status, viewsets\nfrom rest_framework.exceptions import NotFound, MethodNotAllowed\nfrom rest_framework.renderers import BrowsableAPIRenderer\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticatedOrReadOnly\nfrom sqlalchemy.exc import IntegrityError, DataError\n\nfrom mathesar.api.db.permissions.records import RecordAccessPolicy\nfrom mathesar.api.exceptions.error_codes import ErrorCodes\nimport mathesar.api.exceptions.database_exceptions.exceptions as database_api_exceptions\nimport mathesar.api.exceptions.generic_exceptions.base_exceptions as generic_api_exceptions\nfrom db.functions.exceptions import (\n BadDBFunctionFormat, ReferencedColumnsDontExist, UnknownDBFunctionID,\n)\nfrom db.records.exceptions import (\n BadGroupFormat, GroupFieldNotFound, InvalidGroupType, UndefinedFunction,\n BadSortFormat, SortFieldNotFound\n)\nfrom mathesar.api.pagination import TableLimitOffsetPagination\nfrom mathesar.api.serializers.records import RecordListParameterSerializer, RecordSerializer\nfrom mathesar.api.utils import get_table_or_404\nfrom mathesar.functions.operations.convert import rewrite_db_function_spec_column_ids_to_names\nfrom mathesar.models.base import Table\nfrom mathesar.utils.json import MathesarJSONRenderer\n\n\nclass RecordViewSet(AccessViewSetMixin, viewsets.ViewSet):\n permission_classes = [IsAuthenticatedOrReadOnly]\n access_policy = RecordAccessPolicy\n\n # There is no 'update' method.\n # We're not supporting PUT requests because there aren't a lot of use cases\n # where the entire record needs to be replaced, PATCH suffices for updates.\n def get_queryset(self):\n return Table.objects.all().order_by('-created_at')\n\n renderer_classes = [MathesarJSONRenderer, BrowsableAPIRenderer]\n\n # For filter parameter formatting, see:\n # db/functions/operations/deserialize.py::get_db_function_from_ma_function_spec function doc>\n # For sorting parameter formatting, see:\n # https://github.com/centerofci/sqlalchemy-filters#sort-format\n def list(self, request, table_pk=None):\n paginator = TableLimitOffsetPagination()\n\n serializer = RecordListParameterSerializer(data=request.GET)\n serializer.is_valid(raise_exception=True)\n table = get_table_or_404(table_pk)\n\n filter_unprocessed = serializer.validated_data['filter']\n order_by = serializer.validated_data['order_by']\n grouping = serializer.validated_data['grouping']\n search_fuzzy = serializer.validated_data['search_fuzzy']\n filter_processed = None\n column_names_to_ids = table.get_column_name_id_bidirectional_map()\n column_ids_to_names = column_names_to_ids.inverse\n if filter_unprocessed:\n filter_processed = rewrite_db_function_spec_column_ids_to_names(\n column_ids_to_names=column_ids_to_names,\n spec=filter_unprocessed,\n )\n # Replace column id value used in the `field` property with column name\n name_converted_group_by = None\n if grouping:\n group_by_columns_names = [column_ids_to_names[column_id] for column_id in grouping['columns']]\n name_converted_group_by = {**grouping, 'columns': group_by_columns_names}\n name_converted_order_by = [{**column, 'field': column_ids_to_names[column['field']]} for column in order_by]\n name_converted_search = [{**column, 'column': column_ids_to_names[column['field']]} for column in search_fuzzy]\n\n try:\n records = paginator.paginate_queryset(\n self.get_queryset(), request, table, column_names_to_ids,\n filters=filter_processed,\n order_by=name_converted_order_by,\n grouping=name_converted_group_by,\n search=name_converted_search,\n duplicate_only=serializer.validated_data['duplicate_only']\n )\n except (BadDBFunctionFormat, UnknownDBFunctionID, ReferencedColumnsDontExist) as e:\n raise database_api_exceptions.BadFilterAPIException(\n e,\n field='filters',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except (BadSortFormat, SortFieldNotFound) as e:\n raise database_api_exceptions.BadSortAPIException(\n e,\n field='order_by',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except (BadGroupFormat, GroupFieldNotFound, InvalidGroupType) as e:\n raise database_api_exceptions.BadGroupAPIException(\n e,\n field='grouping',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except UndefinedFunction as e:\n raise database_api_exceptions.UndefinedFunctionAPIException(\n e,\n details=e.args[0],\n status_code=status.HTTP_400_BAD_REQUEST\n )\n except DataError as e:\n if isinstance(e.orig, InvalidDatetimeFormat):\n raise database_api_exceptions.InvalidDateFormatAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n )\n elif isinstance(e.orig, DatetimeFieldOverflow):\n raise database_api_exceptions.InvalidDateAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n )\n else:\n raise database_api_exceptions.MathesarAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST\n )\n\n serializer = RecordSerializer(\n records,\n many=True,\n context=self.get_serializer_context(table)\n )\n return paginator.get_paginated_response(serializer.data)\n\n def retrieve(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n # TODO refactor to use serializer for more DRY response logic\n paginator = TableLimitOffsetPagination()\n record_filters = {\n \"equal\": [\n {\"column_name\": [table.primary_key_column_name]},\n {\"literal\": [pk]}\n ]\n }\n column_names_to_ids = table.get_column_name_id_bidirectional_map()\n records = paginator.paginate_queryset(\n table,\n request,\n table,\n column_names_to_ids,\n filters=record_filters\n )\n if not records:\n raise NotFound\n serializer = RecordSerializer(\n records,\n many=True,\n context=self.get_serializer_context(table)\n )\n return paginator.get_paginated_response(serializer.data)\n\n def create(self, request, table_pk=None):\n table = get_table_or_404(table_pk)\n primary_key_column_name = None\n try:\n primary_key_column_name = table.primary_key_column_name\n except AssertionError:\n raise generic_api_exceptions.MethodNotAllowedAPIException(\n MethodNotAllowed,\n error_code=ErrorCodes.MethodNotAllowed.value,\n message=\"You cannot insert into tables without a primary key\"\n )\n serializer = RecordSerializer(data=request.data, context=self.get_serializer_context(table))\n serializer.is_valid(raise_exception=True)\n serializer.save()\n # TODO refactor to use serializer for more DRY response logic\n column_name_id_map = table.get_column_name_id_bidirectional_map()\n table_pk_column_id = column_name_id_map[primary_key_column_name]\n pk_value = serializer.data[table_pk_column_id]\n paginator = TableLimitOffsetPagination()\n record_filters = {\n \"equal\": [\n {\"column_name\": [primary_key_column_name]},\n {\"literal\": [pk_value]}\n ]\n }\n column_names_to_ids = table.get_column_name_id_bidirectional_map()\n records = paginator.paginate_queryset(\n table,\n request,\n table,\n column_names_to_ids,\n filters=record_filters\n )\n serializer = RecordSerializer(\n records,\n many=True,\n context=self.get_serializer_context(table)\n )\n response = paginator.get_paginated_response(serializer.data)\n response.status_code = status.HTTP_201_CREATED\n return response\n\n def partial_update(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n serializer = RecordSerializer(\n {'id': pk},\n data=request.data,\n context=self.get_serializer_context(table),\n partial=True\n )\n serializer.is_valid(raise_exception=True)\n serializer.save()\n # TODO refactor to use serializer for more DRY response logic\n paginator = TableLimitOffsetPagination()\n record_filters = {\n \"equal\": [\n {\"column_name\": [table.primary_key_column_name]},\n {\"literal\": [pk]}\n ]\n }\n column_names_to_ids = table.get_column_name_id_bidirectional_map()\n records = paginator.paginate_queryset(\n table,\n request,\n table,\n column_names_to_ids,\n filters=record_filters\n )\n serializer = RecordSerializer(\n records,\n many=True,\n context=self.get_serializer_context(table)\n )\n return paginator.get_paginated_response(serializer.data)\n\n def destroy(self, request, pk=None, table_pk=None):\n table = get_table_or_404(table_pk)\n if table.get_record(pk) is None:\n raise generic_api_exceptions.NotFoundAPIException(\n NotFound,\n error_code=ErrorCodes.RecordNotFound.value,\n message=\"Record doesn't exist\"\n )\n try:\n table.delete_record(pk)\n except IntegrityError as e:\n if isinstance(e.orig, ForeignKeyViolation):\n raise database_api_exceptions.ForeignKeyViolationAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n referent_table=table,\n )\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def get_serializer_context(self, table):\n columns_map = table.get_column_name_id_bidirectional_map()\n context = {'columns_map': columns_map, 'table': table}\n return context\n", "path": "mathesar/api/db/viewsets/records.py"}]} | 3,247 | 236 |
gh_patches_debug_54112 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1639 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Some translation strings missing/not applied
**Describe the bug**
Some translations are not being applied.
**To Reproduce**
change Bookwyrm language to other than English (tested with Lithuanian)
https://ziurkes.group.lt/user/athinkingmeat/books/read
https://ziurkes.group.lt/user/athinkingmeat/books/reading
https://ziurkes.group.lt/user/athinkingmeat/books/to-read
**Expected behavior**
All these links should have "read", "currently reading" and "to read" strings translated, but they are shown in English
**Screenshots**



**Instance**
https://ziurkes.group.lt/
**Additional context**
Probably is a problem with other languages as well
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/utils/isni.py`
Content:
```
1 """ISNI author checking utilities"""
2 import xml.etree.ElementTree as ET
3 import requests
4
5 from bookwyrm import activitypub, models
6
7
8 def request_isni_data(search_index, search_term, max_records=5):
9 """Request data from the ISNI API"""
10
11 search_string = f'{search_index}="{search_term}"'
12 query_params = {
13 "query": search_string,
14 "version": "1.1",
15 "operation": "searchRetrieve",
16 "recordSchema": "isni-b",
17 "maximumRecords": max_records,
18 "startRecord": "1",
19 "recordPacking": "xml",
20 "sortKeys": "RLV,pica,0,,",
21 }
22 result = requests.get("http://isni.oclc.org/sru/", params=query_params, timeout=10)
23 # the OCLC ISNI server asserts the payload is encoded
24 # in latin1, but we know better
25 result.encoding = "utf-8"
26 return result.text
27
28
29 def make_name_string(element):
30 """create a string of form 'personal_name surname'"""
31
32 # NOTE: this will often be incorrect, many naming systems
33 # list "surname" before personal name
34 forename = element.find(".//forename")
35 surname = element.find(".//surname")
36 if forename is not None:
37 return "".join([forename.text, " ", surname.text])
38 return surname.text
39
40
41 def get_other_identifier(element, code):
42 """Get other identifiers associated with an author from their ISNI record"""
43
44 identifiers = element.findall(".//otherIdentifierOfIdentity")
45 for section_head in identifiers:
46 if (
47 section_head.find(".//type") is not None
48 and section_head.find(".//type").text == code
49 and section_head.find(".//identifier") is not None
50 ):
51 return section_head.find(".//identifier").text
52
53 # if we can't find it in otherIdentifierOfIdentity,
54 # try sources
55 for source in element.findall(".//sources"):
56 code_of_source = source.find(".//codeOfSource")
57 if code_of_source is not None and code_of_source.text.lower() == code.lower():
58 return source.find(".//sourceIdentifier").text
59
60 return ""
61
62
63 def get_external_information_uri(element, match_string):
64 """Get URLs associated with an author from their ISNI record"""
65
66 sources = element.findall(".//externalInformation")
67 for source in sources:
68 information = source.find(".//information")
69 uri = source.find(".//URI")
70 if (
71 uri is not None
72 and information is not None
73 and information.text.lower() == match_string.lower()
74 ):
75 return uri.text
76 return ""
77
78
79 def find_authors_by_name(name_string, description=False):
80 """Query the ISNI database for possible author matches by name"""
81
82 payload = request_isni_data("pica.na", name_string)
83 # parse xml
84 root = ET.fromstring(payload)
85 # build list of possible authors
86 possible_authors = []
87 for element in root.iter("responseRecord"):
88 personal_name = element.find(".//forename/..")
89 if not personal_name:
90 continue
91
92 author = get_author_from_isni(element.find(".//isniUnformatted").text)
93
94 if bool(description):
95
96 titles = []
97 # prefer title records from LoC+ coop, Australia, Ireland, or Singapore
98 # in that order
99 for source in ["LCNACO", "NLA", "N6I", "NLB"]:
100 for parent in element.findall(f'.//titleOfWork/[@source="{source}"]'):
101 titles.append(parent.find(".//title"))
102 for parent in element.findall(f'.//titleOfWork[@subsource="{source}"]'):
103 titles.append(parent.find(".//title"))
104 # otherwise just grab the first title listing
105 titles.append(element.find(".//title"))
106
107 if titles is not None:
108 # some of the "titles" in ISNI are a little ...iffy
109 # '@' is used by ISNI/OCLC to index the starting point ignoring stop words
110 # (e.g. "The @Government of no one")
111 title_elements = [
112 e for e in titles if not e.text.replace("@", "").isnumeric()
113 ]
114 if len(title_elements):
115 author.bio = title_elements[0].text.replace("@", "")
116 else:
117 author.bio = None
118
119 possible_authors.append(author)
120
121 return possible_authors
122
123
124 def get_author_from_isni(isni):
125 """Find data to populate a new author record from their ISNI"""
126
127 payload = request_isni_data("pica.isn", isni)
128 # parse xml
129 root = ET.fromstring(payload)
130 # there should only be a single responseRecord
131 # but let's use the first one just in case
132 element = root.find(".//responseRecord")
133 name = make_name_string(element.find(".//forename/.."))
134 viaf = get_other_identifier(element, "viaf")
135 # use a set to dedupe aliases in ISNI
136 aliases = set()
137 aliases_element = element.findall(".//personalNameVariant")
138 for entry in aliases_element:
139 aliases.add(make_name_string(entry))
140 # aliases needs to be list not set
141 aliases = list(aliases)
142 bio = element.find(".//nameTitle")
143 bio = bio.text if bio is not None else ""
144 wikipedia = get_external_information_uri(element, "Wikipedia")
145
146 author = activitypub.Author(
147 id=element.find(".//isniURI").text,
148 name=name,
149 isni=isni,
150 viafId=viaf,
151 aliases=aliases,
152 bio=bio,
153 wikipediaLink=wikipedia,
154 )
155
156 return author
157
158
159 def build_author_from_isni(match_value):
160 """Build basic author class object from ISNI URL"""
161
162 # if it is an isni value get the data
163 if match_value.startswith("https://isni.org/isni/"):
164 isni = match_value.replace("https://isni.org/isni/", "")
165 return {"author": get_author_from_isni(isni)}
166 # otherwise it's a name string
167 return {}
168
169
170 def augment_author_metadata(author, isni):
171 """Update any missing author fields from ISNI data"""
172
173 isni_author = get_author_from_isni(isni)
174 isni_author.to_model(model=models.Author, instance=author, overwrite=False)
175
176 # we DO want to overwrite aliases because we're adding them to the
177 # existing aliases and ISNI will usually have more.
178 # We need to dedupe because ISNI records often have lots of dupe aliases
179 aliases = set(isni_author.aliases)
180 for alias in author.aliases:
181 aliases.add(alias)
182 author.aliases = list(aliases)
183 author.save()
184
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/utils/isni.py b/bookwyrm/utils/isni.py
--- a/bookwyrm/utils/isni.py
+++ b/bookwyrm/utils/isni.py
@@ -19,7 +19,7 @@
"recordPacking": "xml",
"sortKeys": "RLV,pica,0,,",
}
- result = requests.get("http://isni.oclc.org/sru/", params=query_params, timeout=10)
+ result = requests.get("http://isni.oclc.org/sru/", params=query_params, timeout=15)
# the OCLC ISNI server asserts the payload is encoded
# in latin1, but we know better
result.encoding = "utf-8"
| {"golden_diff": "diff --git a/bookwyrm/utils/isni.py b/bookwyrm/utils/isni.py\n--- a/bookwyrm/utils/isni.py\n+++ b/bookwyrm/utils/isni.py\n@@ -19,7 +19,7 @@\n \"recordPacking\": \"xml\",\n \"sortKeys\": \"RLV,pica,0,,\",\n }\n- result = requests.get(\"http://isni.oclc.org/sru/\", params=query_params, timeout=10)\n+ result = requests.get(\"http://isni.oclc.org/sru/\", params=query_params, timeout=15)\n # the OCLC ISNI server asserts the payload is encoded\n # in latin1, but we know better\n result.encoding = \"utf-8\"\n", "issue": "Some translation strings missing/not applied\n**Describe the bug**\r\nSome translations are not being applied.\r\n\r\n**To Reproduce**\r\n\r\nchange Bookwyrm language to other than English (tested with Lithuanian)\r\n\r\nhttps://ziurkes.group.lt/user/athinkingmeat/books/read\r\nhttps://ziurkes.group.lt/user/athinkingmeat/books/reading\r\nhttps://ziurkes.group.lt/user/athinkingmeat/books/to-read\r\n\r\n**Expected behavior**\r\nAll these links should have \"read\", \"currently reading\" and \"to read\" strings translated, but they are shown in English\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n**Instance**\r\n\r\nhttps://ziurkes.group.lt/\r\n\r\n**Additional context**\r\nProbably is a problem with other languages as well\r\n\n", "before_files": [{"content": "\"\"\"ISNI author checking utilities\"\"\"\nimport xml.etree.ElementTree as ET\nimport requests\n\nfrom bookwyrm import activitypub, models\n\n\ndef request_isni_data(search_index, search_term, max_records=5):\n \"\"\"Request data from the ISNI API\"\"\"\n\n search_string = f'{search_index}=\"{search_term}\"'\n query_params = {\n \"query\": search_string,\n \"version\": \"1.1\",\n \"operation\": \"searchRetrieve\",\n \"recordSchema\": \"isni-b\",\n \"maximumRecords\": max_records,\n \"startRecord\": \"1\",\n \"recordPacking\": \"xml\",\n \"sortKeys\": \"RLV,pica,0,,\",\n }\n result = requests.get(\"http://isni.oclc.org/sru/\", params=query_params, timeout=10)\n # the OCLC ISNI server asserts the payload is encoded\n # in latin1, but we know better\n result.encoding = \"utf-8\"\n return result.text\n\n\ndef make_name_string(element):\n \"\"\"create a string of form 'personal_name surname'\"\"\"\n\n # NOTE: this will often be incorrect, many naming systems\n # list \"surname\" before personal name\n forename = element.find(\".//forename\")\n surname = element.find(\".//surname\")\n if forename is not None:\n return \"\".join([forename.text, \" \", surname.text])\n return surname.text\n\n\ndef get_other_identifier(element, code):\n \"\"\"Get other identifiers associated with an author from their ISNI record\"\"\"\n\n identifiers = element.findall(\".//otherIdentifierOfIdentity\")\n for section_head in identifiers:\n if (\n section_head.find(\".//type\") is not None\n and section_head.find(\".//type\").text == code\n and section_head.find(\".//identifier\") is not None\n ):\n return section_head.find(\".//identifier\").text\n\n # if we can't find it in otherIdentifierOfIdentity,\n # try sources\n for source in element.findall(\".//sources\"):\n code_of_source = source.find(\".//codeOfSource\")\n if code_of_source is not None and code_of_source.text.lower() == code.lower():\n return source.find(\".//sourceIdentifier\").text\n\n return \"\"\n\n\ndef get_external_information_uri(element, match_string):\n \"\"\"Get URLs associated with an author from their ISNI record\"\"\"\n\n sources = element.findall(\".//externalInformation\")\n for source in sources:\n information = source.find(\".//information\")\n uri = source.find(\".//URI\")\n if (\n uri is not None\n and information is not None\n and information.text.lower() == match_string.lower()\n ):\n return uri.text\n return \"\"\n\n\ndef find_authors_by_name(name_string, description=False):\n \"\"\"Query the ISNI database for possible author matches by name\"\"\"\n\n payload = request_isni_data(\"pica.na\", name_string)\n # parse xml\n root = ET.fromstring(payload)\n # build list of possible authors\n possible_authors = []\n for element in root.iter(\"responseRecord\"):\n personal_name = element.find(\".//forename/..\")\n if not personal_name:\n continue\n\n author = get_author_from_isni(element.find(\".//isniUnformatted\").text)\n\n if bool(description):\n\n titles = []\n # prefer title records from LoC+ coop, Australia, Ireland, or Singapore\n # in that order\n for source in [\"LCNACO\", \"NLA\", \"N6I\", \"NLB\"]:\n for parent in element.findall(f'.//titleOfWork/[@source=\"{source}\"]'):\n titles.append(parent.find(\".//title\"))\n for parent in element.findall(f'.//titleOfWork[@subsource=\"{source}\"]'):\n titles.append(parent.find(\".//title\"))\n # otherwise just grab the first title listing\n titles.append(element.find(\".//title\"))\n\n if titles is not None:\n # some of the \"titles\" in ISNI are a little ...iffy\n # '@' is used by ISNI/OCLC to index the starting point ignoring stop words\n # (e.g. \"The @Government of no one\")\n title_elements = [\n e for e in titles if not e.text.replace(\"@\", \"\").isnumeric()\n ]\n if len(title_elements):\n author.bio = title_elements[0].text.replace(\"@\", \"\")\n else:\n author.bio = None\n\n possible_authors.append(author)\n\n return possible_authors\n\n\ndef get_author_from_isni(isni):\n \"\"\"Find data to populate a new author record from their ISNI\"\"\"\n\n payload = request_isni_data(\"pica.isn\", isni)\n # parse xml\n root = ET.fromstring(payload)\n # there should only be a single responseRecord\n # but let's use the first one just in case\n element = root.find(\".//responseRecord\")\n name = make_name_string(element.find(\".//forename/..\"))\n viaf = get_other_identifier(element, \"viaf\")\n # use a set to dedupe aliases in ISNI\n aliases = set()\n aliases_element = element.findall(\".//personalNameVariant\")\n for entry in aliases_element:\n aliases.add(make_name_string(entry))\n # aliases needs to be list not set\n aliases = list(aliases)\n bio = element.find(\".//nameTitle\")\n bio = bio.text if bio is not None else \"\"\n wikipedia = get_external_information_uri(element, \"Wikipedia\")\n\n author = activitypub.Author(\n id=element.find(\".//isniURI\").text,\n name=name,\n isni=isni,\n viafId=viaf,\n aliases=aliases,\n bio=bio,\n wikipediaLink=wikipedia,\n )\n\n return author\n\n\ndef build_author_from_isni(match_value):\n \"\"\"Build basic author class object from ISNI URL\"\"\"\n\n # if it is an isni value get the data\n if match_value.startswith(\"https://isni.org/isni/\"):\n isni = match_value.replace(\"https://isni.org/isni/\", \"\")\n return {\"author\": get_author_from_isni(isni)}\n # otherwise it's a name string\n return {}\n\n\ndef augment_author_metadata(author, isni):\n \"\"\"Update any missing author fields from ISNI data\"\"\"\n\n isni_author = get_author_from_isni(isni)\n isni_author.to_model(model=models.Author, instance=author, overwrite=False)\n\n # we DO want to overwrite aliases because we're adding them to the\n # existing aliases and ISNI will usually have more.\n # We need to dedupe because ISNI records often have lots of dupe aliases\n aliases = set(isni_author.aliases)\n for alias in author.aliases:\n aliases.add(alias)\n author.aliases = list(aliases)\n author.save()\n", "path": "bookwyrm/utils/isni.py"}], "after_files": [{"content": "\"\"\"ISNI author checking utilities\"\"\"\nimport xml.etree.ElementTree as ET\nimport requests\n\nfrom bookwyrm import activitypub, models\n\n\ndef request_isni_data(search_index, search_term, max_records=5):\n \"\"\"Request data from the ISNI API\"\"\"\n\n search_string = f'{search_index}=\"{search_term}\"'\n query_params = {\n \"query\": search_string,\n \"version\": \"1.1\",\n \"operation\": \"searchRetrieve\",\n \"recordSchema\": \"isni-b\",\n \"maximumRecords\": max_records,\n \"startRecord\": \"1\",\n \"recordPacking\": \"xml\",\n \"sortKeys\": \"RLV,pica,0,,\",\n }\n result = requests.get(\"http://isni.oclc.org/sru/\", params=query_params, timeout=15)\n # the OCLC ISNI server asserts the payload is encoded\n # in latin1, but we know better\n result.encoding = \"utf-8\"\n return result.text\n\n\ndef make_name_string(element):\n \"\"\"create a string of form 'personal_name surname'\"\"\"\n\n # NOTE: this will often be incorrect, many naming systems\n # list \"surname\" before personal name\n forename = element.find(\".//forename\")\n surname = element.find(\".//surname\")\n if forename is not None:\n return \"\".join([forename.text, \" \", surname.text])\n return surname.text\n\n\ndef get_other_identifier(element, code):\n \"\"\"Get other identifiers associated with an author from their ISNI record\"\"\"\n\n identifiers = element.findall(\".//otherIdentifierOfIdentity\")\n for section_head in identifiers:\n if (\n section_head.find(\".//type\") is not None\n and section_head.find(\".//type\").text == code\n and section_head.find(\".//identifier\") is not None\n ):\n return section_head.find(\".//identifier\").text\n\n # if we can't find it in otherIdentifierOfIdentity,\n # try sources\n for source in element.findall(\".//sources\"):\n code_of_source = source.find(\".//codeOfSource\")\n if code_of_source is not None and code_of_source.text.lower() == code.lower():\n return source.find(\".//sourceIdentifier\").text\n\n return \"\"\n\n\ndef get_external_information_uri(element, match_string):\n \"\"\"Get URLs associated with an author from their ISNI record\"\"\"\n\n sources = element.findall(\".//externalInformation\")\n for source in sources:\n information = source.find(\".//information\")\n uri = source.find(\".//URI\")\n if (\n uri is not None\n and information is not None\n and information.text.lower() == match_string.lower()\n ):\n return uri.text\n return \"\"\n\n\ndef find_authors_by_name(name_string, description=False):\n \"\"\"Query the ISNI database for possible author matches by name\"\"\"\n\n payload = request_isni_data(\"pica.na\", name_string)\n # parse xml\n root = ET.fromstring(payload)\n # build list of possible authors\n possible_authors = []\n for element in root.iter(\"responseRecord\"):\n personal_name = element.find(\".//forename/..\")\n if not personal_name:\n continue\n\n author = get_author_from_isni(element.find(\".//isniUnformatted\").text)\n\n if bool(description):\n\n titles = []\n # prefer title records from LoC+ coop, Australia, Ireland, or Singapore\n # in that order\n for source in [\"LCNACO\", \"NLA\", \"N6I\", \"NLB\"]:\n for parent in element.findall(f'.//titleOfWork/[@source=\"{source}\"]'):\n titles.append(parent.find(\".//title\"))\n for parent in element.findall(f'.//titleOfWork[@subsource=\"{source}\"]'):\n titles.append(parent.find(\".//title\"))\n # otherwise just grab the first title listing\n titles.append(element.find(\".//title\"))\n\n if titles is not None:\n # some of the \"titles\" in ISNI are a little ...iffy\n # '@' is used by ISNI/OCLC to index the starting point ignoring stop words\n # (e.g. \"The @Government of no one\")\n title_elements = [\n e for e in titles if not e.text.replace(\"@\", \"\").isnumeric()\n ]\n if len(title_elements):\n author.bio = title_elements[0].text.replace(\"@\", \"\")\n else:\n author.bio = None\n\n possible_authors.append(author)\n\n return possible_authors\n\n\ndef get_author_from_isni(isni):\n \"\"\"Find data to populate a new author record from their ISNI\"\"\"\n\n payload = request_isni_data(\"pica.isn\", isni)\n # parse xml\n root = ET.fromstring(payload)\n # there should only be a single responseRecord\n # but let's use the first one just in case\n element = root.find(\".//responseRecord\")\n name = make_name_string(element.find(\".//forename/..\"))\n viaf = get_other_identifier(element, \"viaf\")\n # use a set to dedupe aliases in ISNI\n aliases = set()\n aliases_element = element.findall(\".//personalNameVariant\")\n for entry in aliases_element:\n aliases.add(make_name_string(entry))\n # aliases needs to be list not set\n aliases = list(aliases)\n bio = element.find(\".//nameTitle\")\n bio = bio.text if bio is not None else \"\"\n wikipedia = get_external_information_uri(element, \"Wikipedia\")\n\n author = activitypub.Author(\n id=element.find(\".//isniURI\").text,\n name=name,\n isni=isni,\n viafId=viaf,\n aliases=aliases,\n bio=bio,\n wikipediaLink=wikipedia,\n )\n\n return author\n\n\ndef build_author_from_isni(match_value):\n \"\"\"Build basic author class object from ISNI URL\"\"\"\n\n # if it is an isni value get the data\n if match_value.startswith(\"https://isni.org/isni/\"):\n isni = match_value.replace(\"https://isni.org/isni/\", \"\")\n return {\"author\": get_author_from_isni(isni)}\n # otherwise it's a name string\n return {}\n\n\ndef augment_author_metadata(author, isni):\n \"\"\"Update any missing author fields from ISNI data\"\"\"\n\n isni_author = get_author_from_isni(isni)\n isni_author.to_model(model=models.Author, instance=author, overwrite=False)\n\n # we DO want to overwrite aliases because we're adding them to the\n # existing aliases and ISNI will usually have more.\n # We need to dedupe because ISNI records often have lots of dupe aliases\n aliases = set(isni_author.aliases)\n for alias in author.aliases:\n aliases.add(alias)\n author.aliases = list(aliases)\n author.save()\n", "path": "bookwyrm/utils/isni.py"}]} | 2,520 | 164 |
gh_patches_debug_11968 | rasdani/github-patches | git_diff | getredash__redash-1484 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong sort for Date column
It happens if sort by date column with UI.
Screenshot shows everything.
<img width="544" alt="2016-12-06 14 37 07" src="https://cloud.githubusercontent.com/assets/7091907/20924299/dea511b4-bbc1-11e6-8ffd-2bdda4bcbbd8.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/handlers/queries.py`
Content:
```
1 from itertools import chain
2
3 import sqlparse
4 from flask import jsonify, request
5 from flask_login import login_required
6 from flask_restful import abort
7 from funcy import distinct, take
8 from sqlalchemy.orm.exc import StaleDataError
9
10 from redash import models
11 from redash.handlers.base import (BaseResource, get_object_or_404,
12 org_scoped_rule, paginate, routes)
13 from redash.handlers.query_results import run_query
14 from redash.permissions import (can_modify, not_view_only, require_access,
15 require_admin_or_owner,
16 require_object_modify_permission,
17 require_permission, view_only)
18 from redash.utils import collect_parameters_from_request
19
20
21 @routes.route(org_scoped_rule('/api/queries/format'), methods=['POST'])
22 @login_required
23 def format_sql_query(org_slug=None):
24 arguments = request.get_json(force=True)
25 query = arguments.get("query", "")
26
27 return jsonify({'query': sqlparse.format(query, reindent=True, keyword_case='upper')})
28
29
30 class QuerySearchResource(BaseResource):
31 @require_permission('view_query')
32 def get(self):
33 term = request.args.get('q', '')
34
35 return [q.to_dict(with_last_modified_by=False) for q in models.Query.search(term, self.current_user.group_ids)]
36
37
38 class QueryRecentResource(BaseResource):
39 @require_permission('view_query')
40 def get(self):
41 queries = models.Query.recent(self.current_user.group_ids, self.current_user.id)
42 recent = [d.to_dict(with_last_modified_by=False) for d in queries]
43
44 global_recent = []
45 if len(recent) < 10:
46 global_recent = [d.to_dict(with_last_modified_by=False) for d in models.Query.recent(self.current_user.group_ids)]
47
48 return take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))
49
50
51 class QueryListResource(BaseResource):
52 @require_permission('create_query')
53 def post(self):
54 query_def = request.get_json(force=True)
55 data_source = models.DataSource.get_by_id_and_org(query_def.pop('data_source_id'), self.current_org)
56 require_access(data_source.groups, self.current_user, not_view_only)
57
58 for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']:
59 query_def.pop(field, None)
60
61 # If we already executed this query, save the query result reference
62 if 'latest_query_data_id' in query_def:
63 query_def['latest_query_data'] = query_def.pop('latest_query_data_id')
64
65 query_def['query_text'] = query_def.pop('query')
66 query_def['user'] = self.current_user
67 query_def['data_source'] = data_source
68 query_def['org'] = self.current_org
69 query_def['is_draft'] = True
70 query = models.Query.create(**query_def)
71 models.db.session.add(query)
72 models.db.session.commit()
73
74 self.record_event({
75 'action': 'create',
76 'object_id': query.id,
77 'object_type': 'query'
78 })
79
80 return query.to_dict()
81
82 @require_permission('view_query')
83 def get(self):
84 results = models.Query.all_queries(self.current_user.group_ids)
85 page = request.args.get('page', 1, type=int)
86 page_size = request.args.get('page_size', 25, type=int)
87 return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))
88
89
90 class MyQueriesResource(BaseResource):
91 @require_permission('view_query')
92 def get(self):
93 drafts = request.args.get('drafts') is not None
94 results = models.Query.by_user(self.current_user, drafts)
95 page = request.args.get('page', 1, type=int)
96 page_size = request.args.get('page_size', 25, type=int)
97 return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))
98
99
100 class QueryResource(BaseResource):
101 @require_permission('edit_query')
102 def post(self, query_id):
103 query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
104 query_def = request.get_json(force=True)
105
106 require_object_modify_permission(query, self.current_user)
107
108 for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user', 'last_modified_by', 'org']:
109 query_def.pop(field, None)
110
111 if 'query' in query_def:
112 query_def['query_text'] = query_def.pop('query')
113
114 query_def['last_modified_by'] = self.current_user
115 query_def['changed_by'] = self.current_user
116 # SQLAlchemy handles the case where a concurrent transaction beats us
117 # to the update. But we still have to make sure that we're not starting
118 # out behind.
119 if 'version' in query_def and query_def['version'] != query.version:
120 abort(409)
121
122 try:
123 self.update_model(query, query_def)
124 models.db.session.commit()
125 except StaleDataError:
126 abort(409)
127
128 result = query.to_dict(with_visualizations=True)
129 return result
130
131 @require_permission('view_query')
132 def get(self, query_id):
133 q = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
134 require_access(q.groups, self.current_user, view_only)
135
136 result = q.to_dict(with_visualizations=True)
137 result['can_edit'] = can_modify(q, self.current_user)
138 return result
139
140 # TODO: move to resource of its own? (POST /queries/{id}/archive)
141 def delete(self, query_id):
142 query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
143 require_admin_or_owner(query.user_id)
144 query.archive(self.current_user)
145
146
147 class QueryForkResource(BaseResource):
148 @require_permission('edit_query')
149 def post(self, query_id):
150 query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
151 forked_query = query.fork(self.current_user)
152 return forked_query.to_dict(with_visualizations=True)
153
154
155 class QueryRefreshResource(BaseResource):
156 def post(self, query_id):
157 query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)
158 require_access(query.groups, self.current_user, not_view_only)
159
160 parameter_values = collect_parameters_from_request(request.args)
161
162 return run_query(query.data_source, parameter_values, query.query_text, query.id)
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py
--- a/redash/handlers/queries.py
+++ b/redash/handlers/queries.py
@@ -58,10 +58,6 @@
for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']:
query_def.pop(field, None)
- # If we already executed this query, save the query result reference
- if 'latest_query_data_id' in query_def:
- query_def['latest_query_data'] = query_def.pop('latest_query_data_id')
-
query_def['query_text'] = query_def.pop('query')
query_def['user'] = self.current_user
query_def['data_source'] = data_source
| {"golden_diff": "diff --git a/redash/handlers/queries.py b/redash/handlers/queries.py\n--- a/redash/handlers/queries.py\n+++ b/redash/handlers/queries.py\n@@ -58,10 +58,6 @@\n for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']:\n query_def.pop(field, None)\n \n- # If we already executed this query, save the query result reference\n- if 'latest_query_data_id' in query_def:\n- query_def['latest_query_data'] = query_def.pop('latest_query_data_id')\n-\n query_def['query_text'] = query_def.pop('query')\n query_def['user'] = self.current_user\n query_def['data_source'] = data_source\n", "issue": "Wrong sort for Date column\nIt happens if sort by date column with UI.\r\n\r\nScreenshot shows everything. \r\n\r\n<img width=\"544\" alt=\"2016-12-06 14 37 07\" src=\"https://cloud.githubusercontent.com/assets/7091907/20924299/dea511b4-bbc1-11e6-8ffd-2bdda4bcbbd8.png\">\r\n\n", "before_files": [{"content": "from itertools import chain\n\nimport sqlparse\nfrom flask import jsonify, request\nfrom flask_login import login_required\nfrom flask_restful import abort\nfrom funcy import distinct, take\nfrom sqlalchemy.orm.exc import StaleDataError\n\nfrom redash import models\nfrom redash.handlers.base import (BaseResource, get_object_or_404,\n org_scoped_rule, paginate, routes)\nfrom redash.handlers.query_results import run_query\nfrom redash.permissions import (can_modify, not_view_only, require_access,\n require_admin_or_owner,\n require_object_modify_permission,\n require_permission, view_only)\nfrom redash.utils import collect_parameters_from_request\n\n\[email protected](org_scoped_rule('/api/queries/format'), methods=['POST'])\n@login_required\ndef format_sql_query(org_slug=None):\n arguments = request.get_json(force=True)\n query = arguments.get(\"query\", \"\")\n\n return jsonify({'query': sqlparse.format(query, reindent=True, keyword_case='upper')})\n\n\nclass QuerySearchResource(BaseResource):\n @require_permission('view_query')\n def get(self):\n term = request.args.get('q', '')\n\n return [q.to_dict(with_last_modified_by=False) for q in models.Query.search(term, self.current_user.group_ids)]\n\n\nclass QueryRecentResource(BaseResource):\n @require_permission('view_query')\n def get(self):\n queries = models.Query.recent(self.current_user.group_ids, self.current_user.id)\n recent = [d.to_dict(with_last_modified_by=False) for d in queries]\n\n global_recent = []\n if len(recent) < 10:\n global_recent = [d.to_dict(with_last_modified_by=False) for d in models.Query.recent(self.current_user.group_ids)]\n\n return take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))\n\n\nclass QueryListResource(BaseResource):\n @require_permission('create_query')\n def post(self):\n query_def = request.get_json(force=True)\n data_source = models.DataSource.get_by_id_and_org(query_def.pop('data_source_id'), self.current_org)\n require_access(data_source.groups, self.current_user, not_view_only)\n\n for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']:\n query_def.pop(field, None)\n\n # If we already executed this query, save the query result reference\n if 'latest_query_data_id' in query_def:\n query_def['latest_query_data'] = query_def.pop('latest_query_data_id')\n\n query_def['query_text'] = query_def.pop('query')\n query_def['user'] = self.current_user\n query_def['data_source'] = data_source\n query_def['org'] = self.current_org\n query_def['is_draft'] = True\n query = models.Query.create(**query_def)\n models.db.session.add(query)\n models.db.session.commit()\n\n self.record_event({\n 'action': 'create',\n 'object_id': query.id,\n 'object_type': 'query'\n })\n\n return query.to_dict()\n\n @require_permission('view_query')\n def get(self):\n results = models.Query.all_queries(self.current_user.group_ids)\n page = request.args.get('page', 1, type=int)\n page_size = request.args.get('page_size', 25, type=int)\n return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))\n\n\nclass MyQueriesResource(BaseResource):\n @require_permission('view_query')\n def get(self):\n drafts = request.args.get('drafts') is not None\n results = models.Query.by_user(self.current_user, drafts)\n page = request.args.get('page', 1, type=int)\n page_size = request.args.get('page_size', 25, type=int)\n return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))\n\n\nclass QueryResource(BaseResource):\n @require_permission('edit_query')\n def post(self, query_id):\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n query_def = request.get_json(force=True)\n\n require_object_modify_permission(query, self.current_user)\n\n for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user', 'last_modified_by', 'org']:\n query_def.pop(field, None)\n\n if 'query' in query_def:\n query_def['query_text'] = query_def.pop('query')\n\n query_def['last_modified_by'] = self.current_user\n query_def['changed_by'] = self.current_user\n # SQLAlchemy handles the case where a concurrent transaction beats us\n # to the update. But we still have to make sure that we're not starting\n # out behind.\n if 'version' in query_def and query_def['version'] != query.version:\n abort(409)\n\n try:\n self.update_model(query, query_def)\n models.db.session.commit()\n except StaleDataError:\n abort(409)\n\n result = query.to_dict(with_visualizations=True)\n return result\n\n @require_permission('view_query')\n def get(self, query_id):\n q = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_access(q.groups, self.current_user, view_only)\n\n result = q.to_dict(with_visualizations=True)\n result['can_edit'] = can_modify(q, self.current_user)\n return result\n\n # TODO: move to resource of its own? (POST /queries/{id}/archive)\n def delete(self, query_id):\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_admin_or_owner(query.user_id)\n query.archive(self.current_user)\n\n\nclass QueryForkResource(BaseResource):\n @require_permission('edit_query')\n def post(self, query_id):\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n forked_query = query.fork(self.current_user)\n return forked_query.to_dict(with_visualizations=True)\n\n\nclass QueryRefreshResource(BaseResource):\n def post(self, query_id):\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_access(query.groups, self.current_user, not_view_only)\n\n parameter_values = collect_parameters_from_request(request.args)\n\n return run_query(query.data_source, parameter_values, query.query_text, query.id)\n", "path": "redash/handlers/queries.py"}], "after_files": [{"content": "from itertools import chain\n\nimport sqlparse\nfrom flask import jsonify, request\nfrom flask_login import login_required\nfrom flask_restful import abort\nfrom funcy import distinct, take\nfrom sqlalchemy.orm.exc import StaleDataError\n\nfrom redash import models\nfrom redash.handlers.base import (BaseResource, get_object_or_404,\n org_scoped_rule, paginate, routes)\nfrom redash.handlers.query_results import run_query\nfrom redash.permissions import (can_modify, not_view_only, require_access,\n require_admin_or_owner,\n require_object_modify_permission,\n require_permission, view_only)\nfrom redash.utils import collect_parameters_from_request\n\n\[email protected](org_scoped_rule('/api/queries/format'), methods=['POST'])\n@login_required\ndef format_sql_query(org_slug=None):\n arguments = request.get_json(force=True)\n query = arguments.get(\"query\", \"\")\n\n return jsonify({'query': sqlparse.format(query, reindent=True, keyword_case='upper')})\n\n\nclass QuerySearchResource(BaseResource):\n @require_permission('view_query')\n def get(self):\n term = request.args.get('q', '')\n\n return [q.to_dict(with_last_modified_by=False) for q in models.Query.search(term, self.current_user.group_ids)]\n\n\nclass QueryRecentResource(BaseResource):\n @require_permission('view_query')\n def get(self):\n queries = models.Query.recent(self.current_user.group_ids, self.current_user.id)\n recent = [d.to_dict(with_last_modified_by=False) for d in queries]\n\n global_recent = []\n if len(recent) < 10:\n global_recent = [d.to_dict(with_last_modified_by=False) for d in models.Query.recent(self.current_user.group_ids)]\n\n return take(20, distinct(chain(recent, global_recent), key=lambda d: d['id']))\n\n\nclass QueryListResource(BaseResource):\n @require_permission('create_query')\n def post(self):\n query_def = request.get_json(force=True)\n data_source = models.DataSource.get_by_id_and_org(query_def.pop('data_source_id'), self.current_org)\n require_access(data_source.groups, self.current_user, not_view_only)\n\n for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'last_modified_by']:\n query_def.pop(field, None)\n\n query_def['query_text'] = query_def.pop('query')\n query_def['user'] = self.current_user\n query_def['data_source'] = data_source\n query_def['org'] = self.current_org\n query_def['is_draft'] = True\n query = models.Query.create(**query_def)\n models.db.session.add(query)\n models.db.session.commit()\n\n self.record_event({\n 'action': 'create',\n 'object_id': query.id,\n 'object_type': 'query'\n })\n\n return query.to_dict()\n\n @require_permission('view_query')\n def get(self):\n results = models.Query.all_queries(self.current_user.group_ids)\n page = request.args.get('page', 1, type=int)\n page_size = request.args.get('page_size', 25, type=int)\n return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))\n\n\nclass MyQueriesResource(BaseResource):\n @require_permission('view_query')\n def get(self):\n drafts = request.args.get('drafts') is not None\n results = models.Query.by_user(self.current_user, drafts)\n page = request.args.get('page', 1, type=int)\n page_size = request.args.get('page_size', 25, type=int)\n return paginate(results, page, page_size, lambda q: q.to_dict(with_stats=True, with_last_modified_by=False))\n\n\nclass QueryResource(BaseResource):\n @require_permission('edit_query')\n def post(self, query_id):\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n query_def = request.get_json(force=True)\n\n require_object_modify_permission(query, self.current_user)\n\n for field in ['id', 'created_at', 'api_key', 'visualizations', 'latest_query_data', 'user', 'last_modified_by', 'org']:\n query_def.pop(field, None)\n\n if 'query' in query_def:\n query_def['query_text'] = query_def.pop('query')\n\n query_def['last_modified_by'] = self.current_user\n query_def['changed_by'] = self.current_user\n # SQLAlchemy handles the case where a concurrent transaction beats us\n # to the update. But we still have to make sure that we're not starting\n # out behind.\n if 'version' in query_def and query_def['version'] != query.version:\n abort(409)\n\n try:\n self.update_model(query, query_def)\n models.db.session.commit()\n except StaleDataError:\n abort(409)\n\n result = query.to_dict(with_visualizations=True)\n return result\n\n @require_permission('view_query')\n def get(self, query_id):\n q = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_access(q.groups, self.current_user, view_only)\n\n result = q.to_dict(with_visualizations=True)\n result['can_edit'] = can_modify(q, self.current_user)\n return result\n\n # TODO: move to resource of its own? (POST /queries/{id}/archive)\n def delete(self, query_id):\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_admin_or_owner(query.user_id)\n query.archive(self.current_user)\n\n\nclass QueryForkResource(BaseResource):\n @require_permission('edit_query')\n def post(self, query_id):\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n forked_query = query.fork(self.current_user)\n return forked_query.to_dict(with_visualizations=True)\n\n\nclass QueryRefreshResource(BaseResource):\n def post(self, query_id):\n query = get_object_or_404(models.Query.get_by_id_and_org, query_id, self.current_org)\n require_access(query.groups, self.current_user, not_view_only)\n\n parameter_values = collect_parameters_from_request(request.args)\n\n return run_query(query.data_source, parameter_values, query.query_text, query.id)\n", "path": "redash/handlers/queries.py"}]} | 2,205 | 180 |
gh_patches_debug_27380 | rasdani/github-patches | git_diff | deis__deis-2991 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
deis blocks domain creation under improper circumstances
`$ deis domains:add django.paas-sandbox`
`Adding django.paas-sandbox to rubber-undertow... 400 BAD REQUEST`
`{u'domain': [u'Hostname does not look like a valid hostname. Only lowercase characters are allowed.']}`
deis will not let me create a domain where the top of the domain is "paas-sandbox", as displayed above, however this is not a top level domain nor is it intended as one. Our corporation uses an internal domain that is set as the dns search on all corporate computers. For example, if someone types `http://www/` in their browser, it will come up because their dns searches `corp.example.com` for non qualifying domains. They are really going to the site `www.corp.example.com` but all they type and all the host header on the server sees is "www". deis requires the domain to exist how the user will display it in the browser in order to render the page. Our DNS system is almost identical to Active Directory so anyone using Active Directory can relate but this is not a MS / Active Directory specific scenario. This can be done using only dhcp and most ISP's will do this from their dhcp as well.
Anyways, please fix. Thank you.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `controller/api/serializers.py`
Content:
```
1 """
2 Classes to serialize the RESTful representation of Deis API models.
3 """
4
5 from __future__ import unicode_literals
6
7 import json
8 import re
9
10 from django.conf import settings
11 from django.contrib.auth.models import User
12 from django.utils import timezone
13 from rest_framework import serializers
14 from rest_framework.validators import UniqueTogetherValidator
15
16 from api import models
17
18
19 PROCTYPE_MATCH = re.compile(r'^(?P<type>[a-z]+)')
20 MEMLIMIT_MATCH = re.compile(r'^(?P<mem>[0-9]+[BbKkMmGg])$')
21 CPUSHARE_MATCH = re.compile(r'^(?P<cpu>[0-9]+)$')
22 TAGKEY_MATCH = re.compile(r'^[a-z]+$')
23 TAGVAL_MATCH = re.compile(r'^\w+$')
24
25
26 class JSONFieldSerializer(serializers.Field):
27 def to_representation(self, obj):
28 return obj
29
30 def to_internal_value(self, data):
31 try:
32 val = json.loads(data)
33 except TypeError:
34 val = data
35 return val
36
37
38 class ModelSerializer(serializers.ModelSerializer):
39
40 uuid = serializers.ReadOnlyField()
41
42 def get_validators(self):
43 """
44 Hack to remove DRF's UniqueTogetherValidator when it concerns the UUID.
45
46 See https://github.com/deis/deis/pull/2898#discussion_r23105147
47 """
48 validators = super(ModelSerializer, self).get_validators()
49 for v in validators:
50 if isinstance(v, UniqueTogetherValidator) and 'uuid' in v.fields:
51 validators.remove(v)
52 return validators
53
54
55 class UserSerializer(serializers.ModelSerializer):
56 class Meta:
57 model = User
58 fields = ['email', 'username', 'password', 'first_name', 'last_name', 'is_superuser',
59 'is_staff', 'groups', 'user_permissions', 'last_login', 'date_joined',
60 'is_active']
61 read_only_fields = ['is_superuser', 'is_staff', 'groups',
62 'user_permissions', 'last_login', 'date_joined', 'is_active']
63 extra_kwargs = {'password': {'write_only': True}}
64
65 def create(self, validated_data):
66 now = timezone.now()
67 user = User(
68 email=validated_data.get('email'),
69 username=validated_data.get('username'),
70 last_login=now,
71 date_joined=now,
72 is_active=True
73 )
74 if validated_data.get('first_name'):
75 user.first_name = validated_data['first_name']
76 if validated_data.get('last_name'):
77 user.last_name = validated_data['last_name']
78 user.set_password(validated_data['password'])
79 # Make the first signup an admin / superuser
80 if not User.objects.filter(is_superuser=True).exists():
81 user.is_superuser = user.is_staff = True
82 user.save()
83 return user
84
85
86 class AdminUserSerializer(serializers.ModelSerializer):
87 """Serialize admin status for a User model."""
88
89 class Meta:
90 model = User
91 fields = ['username', 'is_superuser']
92 read_only_fields = ['username']
93
94
95 class AppSerializer(ModelSerializer):
96 """Serialize a :class:`~api.models.App` model."""
97
98 owner = serializers.ReadOnlyField(source='owner.username')
99 structure = JSONFieldSerializer(required=False)
100 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
101 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
102
103 class Meta:
104 """Metadata options for a :class:`AppSerializer`."""
105 model = models.App
106 fields = ['uuid', 'id', 'owner', 'url', 'structure', 'created', 'updated']
107 read_only_fields = ['uuid']
108
109
110 class BuildSerializer(ModelSerializer):
111 """Serialize a :class:`~api.models.Build` model."""
112
113 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
114 owner = serializers.ReadOnlyField(source='owner.username')
115 procfile = JSONFieldSerializer(required=False)
116 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
117 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
118
119 class Meta:
120 """Metadata options for a :class:`BuildSerializer`."""
121 model = models.Build
122 fields = ['owner', 'app', 'image', 'sha', 'procfile', 'dockerfile', 'created',
123 'updated', 'uuid']
124 read_only_fields = ['uuid']
125
126
127 class ConfigSerializer(ModelSerializer):
128 """Serialize a :class:`~api.models.Config` model."""
129
130 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
131 owner = serializers.ReadOnlyField(source='owner.username')
132 values = JSONFieldSerializer(required=False)
133 memory = JSONFieldSerializer(required=False)
134 cpu = JSONFieldSerializer(required=False)
135 tags = JSONFieldSerializer(required=False)
136 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
137 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
138
139 class Meta:
140 """Metadata options for a :class:`ConfigSerializer`."""
141 model = models.Config
142
143 def validate_memory(self, value):
144 for k, v in value.items():
145 if v is None: # use NoneType to unset a value
146 continue
147 if not re.match(PROCTYPE_MATCH, k):
148 raise serializers.ValidationError("Process types can only contain [a-z]")
149 if not re.match(MEMLIMIT_MATCH, str(v)):
150 raise serializers.ValidationError(
151 "Limit format: <number><unit>, where unit = B, K, M or G")
152 return value
153
154 def validate_cpu(self, value):
155 for k, v in value.items():
156 if v is None: # use NoneType to unset a value
157 continue
158 if not re.match(PROCTYPE_MATCH, k):
159 raise serializers.ValidationError("Process types can only contain [a-z]")
160 shares = re.match(CPUSHARE_MATCH, str(v))
161 if not shares:
162 raise serializers.ValidationError("CPU shares must be an integer")
163 for v in shares.groupdict().values():
164 try:
165 i = int(v)
166 except ValueError:
167 raise serializers.ValidationError("CPU shares must be an integer")
168 if i > 1024 or i < 0:
169 raise serializers.ValidationError("CPU shares must be between 0 and 1024")
170 return value
171
172 def validate_tags(self, value):
173 for k, v in value.items():
174 if v is None: # use NoneType to unset a value
175 continue
176 if not re.match(TAGKEY_MATCH, k):
177 raise serializers.ValidationError("Tag keys can only contain [a-z]")
178 if not re.match(TAGVAL_MATCH, str(v)):
179 raise serializers.ValidationError("Invalid tag value")
180 return value
181
182
183 class ReleaseSerializer(ModelSerializer):
184 """Serialize a :class:`~api.models.Release` model."""
185
186 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
187 owner = serializers.ReadOnlyField(source='owner.username')
188 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
189 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
190
191 class Meta:
192 """Metadata options for a :class:`ReleaseSerializer`."""
193 model = models.Release
194
195
196 class ContainerSerializer(ModelSerializer):
197 """Serialize a :class:`~api.models.Container` model."""
198
199 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
200 owner = serializers.ReadOnlyField(source='owner.username')
201 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
202 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
203 release = serializers.SerializerMethodField()
204
205 class Meta:
206 """Metadata options for a :class:`ContainerSerializer`."""
207 model = models.Container
208
209 def get_release(self, obj):
210 return "v{}".format(obj.release.version)
211
212
213 class KeySerializer(ModelSerializer):
214 """Serialize a :class:`~api.models.Key` model."""
215
216 owner = serializers.ReadOnlyField(source='owner.username')
217 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
218 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
219
220 class Meta:
221 """Metadata options for a KeySerializer."""
222 model = models.Key
223
224
225 class DomainSerializer(ModelSerializer):
226 """Serialize a :class:`~api.models.Domain` model."""
227
228 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
229 owner = serializers.ReadOnlyField(source='owner.username')
230 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
231 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
232
233 class Meta:
234 """Metadata options for a :class:`DomainSerializer`."""
235 model = models.Domain
236 fields = ['uuid', 'owner', 'created', 'updated', 'app', 'domain']
237
238 def validate_domain(self, value):
239 """
240 Check that the hostname is valid
241 """
242 match = re.match(
243 r'^(\*\.)?(' + settings.APP_URL_REGEX + r'\.)*([a-z0-9-]+)\.([a-z0-9]{2,})$',
244 value)
245 if not match:
246 raise serializers.ValidationError(
247 "Hostname does not look like a valid hostname. "
248 "Only lowercase characters are allowed.")
249
250 if models.Domain.objects.filter(domain=value).exists():
251 raise serializers.ValidationError(
252 "The domain {} is already in use by another app".format(value))
253
254 domain_parts = value.split('.')
255 if domain_parts[0] == '*':
256 raise serializers.ValidationError(
257 "Adding a wildcard subdomain is currently not supported".format(value))
258
259 return value
260
261
262 class PushSerializer(ModelSerializer):
263 """Serialize a :class:`~api.models.Push` model."""
264
265 app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())
266 owner = serializers.ReadOnlyField(source='owner.username')
267 created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
268 updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)
269
270 class Meta:
271 """Metadata options for a :class:`PushSerializer`."""
272 model = models.Push
273 fields = ['uuid', 'owner', 'app', 'sha', 'fingerprint', 'receive_user', 'receive_repo',
274 'ssh_connection', 'ssh_original_command', 'created', 'updated']
275
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/controller/api/serializers.py b/controller/api/serializers.py
--- a/controller/api/serializers.py
+++ b/controller/api/serializers.py
@@ -239,23 +239,22 @@
"""
Check that the hostname is valid
"""
- match = re.match(
- r'^(\*\.)?(' + settings.APP_URL_REGEX + r'\.)*([a-z0-9-]+)\.([a-z0-9]{2,})$',
- value)
- if not match:
+ if len(value) > 255:
+ raise serializers.ValidationError('Hostname must be 255 characters or less.')
+ if value[-1:] == ".":
+ value = value[:-1] # strip exactly one dot from the right, if present
+ labels = value.split('.')
+ if labels[0] == '*':
raise serializers.ValidationError(
- "Hostname does not look like a valid hostname. "
- "Only lowercase characters are allowed.")
-
+ 'Adding a wildcard subdomain is currently not supported.')
+ allowed = re.compile("^(?!-)[a-z0-9-]{1,63}(?<!-)$", re.IGNORECASE)
+ for label in labels:
+ match = allowed.match(label)
+ if not match or '--' in label or label[-1].isdigit() or label.isdigit():
+ raise serializers.ValidationError('Hostname does not look valid.')
if models.Domain.objects.filter(domain=value).exists():
raise serializers.ValidationError(
"The domain {} is already in use by another app".format(value))
-
- domain_parts = value.split('.')
- if domain_parts[0] == '*':
- raise serializers.ValidationError(
- "Adding a wildcard subdomain is currently not supported".format(value))
-
return value
| {"golden_diff": "diff --git a/controller/api/serializers.py b/controller/api/serializers.py\n--- a/controller/api/serializers.py\n+++ b/controller/api/serializers.py\n@@ -239,23 +239,22 @@\n \"\"\"\n Check that the hostname is valid\n \"\"\"\n- match = re.match(\n- r'^(\\*\\.)?(' + settings.APP_URL_REGEX + r'\\.)*([a-z0-9-]+)\\.([a-z0-9]{2,})$',\n- value)\n- if not match:\n+ if len(value) > 255:\n+ raise serializers.ValidationError('Hostname must be 255 characters or less.')\n+ if value[-1:] == \".\":\n+ value = value[:-1] # strip exactly one dot from the right, if present\n+ labels = value.split('.')\n+ if labels[0] == '*':\n raise serializers.ValidationError(\n- \"Hostname does not look like a valid hostname. \"\n- \"Only lowercase characters are allowed.\")\n-\n+ 'Adding a wildcard subdomain is currently not supported.')\n+ allowed = re.compile(\"^(?!-)[a-z0-9-]{1,63}(?<!-)$\", re.IGNORECASE)\n+ for label in labels:\n+ match = allowed.match(label)\n+ if not match or '--' in label or label[-1].isdigit() or label.isdigit():\n+ raise serializers.ValidationError('Hostname does not look valid.')\n if models.Domain.objects.filter(domain=value).exists():\n raise serializers.ValidationError(\n \"The domain {} is already in use by another app\".format(value))\n-\n- domain_parts = value.split('.')\n- if domain_parts[0] == '*':\n- raise serializers.ValidationError(\n- \"Adding a wildcard subdomain is currently not supported\".format(value))\n-\n return value\n", "issue": "deis blocks domain creation under improper circumstances\n`$ deis domains:add django.paas-sandbox`\n`Adding django.paas-sandbox to rubber-undertow... 400 BAD REQUEST`\n`{u'domain': [u'Hostname does not look like a valid hostname. Only lowercase characters are allowed.']}`\n\ndeis will not let me create a domain where the top of the domain is \"paas-sandbox\", as displayed above, however this is not a top level domain nor is it intended as one. Our corporation uses an internal domain that is set as the dns search on all corporate computers. For example, if someone types `http://www/` in their browser, it will come up because their dns searches `corp.example.com` for non qualifying domains. They are really going to the site `www.corp.example.com` but all they type and all the host header on the server sees is \"www\". deis requires the domain to exist how the user will display it in the browser in order to render the page. Our DNS system is almost identical to Active Directory so anyone using Active Directory can relate but this is not a MS / Active Directory specific scenario. This can be done using only dhcp and most ISP's will do this from their dhcp as well.\n\nAnyways, please fix. Thank you.\n\n", "before_files": [{"content": "\"\"\"\nClasses to serialize the RESTful representation of Deis API models.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport json\nimport re\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueTogetherValidator\n\nfrom api import models\n\n\nPROCTYPE_MATCH = re.compile(r'^(?P<type>[a-z]+)')\nMEMLIMIT_MATCH = re.compile(r'^(?P<mem>[0-9]+[BbKkMmGg])$')\nCPUSHARE_MATCH = re.compile(r'^(?P<cpu>[0-9]+)$')\nTAGKEY_MATCH = re.compile(r'^[a-z]+$')\nTAGVAL_MATCH = re.compile(r'^\\w+$')\n\n\nclass JSONFieldSerializer(serializers.Field):\n def to_representation(self, obj):\n return obj\n\n def to_internal_value(self, data):\n try:\n val = json.loads(data)\n except TypeError:\n val = data\n return val\n\n\nclass ModelSerializer(serializers.ModelSerializer):\n\n uuid = serializers.ReadOnlyField()\n\n def get_validators(self):\n \"\"\"\n Hack to remove DRF's UniqueTogetherValidator when it concerns the UUID.\n\n See https://github.com/deis/deis/pull/2898#discussion_r23105147\n \"\"\"\n validators = super(ModelSerializer, self).get_validators()\n for v in validators:\n if isinstance(v, UniqueTogetherValidator) and 'uuid' in v.fields:\n validators.remove(v)\n return validators\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ['email', 'username', 'password', 'first_name', 'last_name', 'is_superuser',\n 'is_staff', 'groups', 'user_permissions', 'last_login', 'date_joined',\n 'is_active']\n read_only_fields = ['is_superuser', 'is_staff', 'groups',\n 'user_permissions', 'last_login', 'date_joined', 'is_active']\n extra_kwargs = {'password': {'write_only': True}}\n\n def create(self, validated_data):\n now = timezone.now()\n user = User(\n email=validated_data.get('email'),\n username=validated_data.get('username'),\n last_login=now,\n date_joined=now,\n is_active=True\n )\n if validated_data.get('first_name'):\n user.first_name = validated_data['first_name']\n if validated_data.get('last_name'):\n user.last_name = validated_data['last_name']\n user.set_password(validated_data['password'])\n # Make the first signup an admin / superuser\n if not User.objects.filter(is_superuser=True).exists():\n user.is_superuser = user.is_staff = True\n user.save()\n return user\n\n\nclass AdminUserSerializer(serializers.ModelSerializer):\n \"\"\"Serialize admin status for a User model.\"\"\"\n\n class Meta:\n model = User\n fields = ['username', 'is_superuser']\n read_only_fields = ['username']\n\n\nclass AppSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.App` model.\"\"\"\n\n owner = serializers.ReadOnlyField(source='owner.username')\n structure = JSONFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`AppSerializer`.\"\"\"\n model = models.App\n fields = ['uuid', 'id', 'owner', 'url', 'structure', 'created', 'updated']\n read_only_fields = ['uuid']\n\n\nclass BuildSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Build` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n procfile = JSONFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`BuildSerializer`.\"\"\"\n model = models.Build\n fields = ['owner', 'app', 'image', 'sha', 'procfile', 'dockerfile', 'created',\n 'updated', 'uuid']\n read_only_fields = ['uuid']\n\n\nclass ConfigSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Config` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n values = JSONFieldSerializer(required=False)\n memory = JSONFieldSerializer(required=False)\n cpu = JSONFieldSerializer(required=False)\n tags = JSONFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`ConfigSerializer`.\"\"\"\n model = models.Config\n\n def validate_memory(self, value):\n for k, v in value.items():\n if v is None: # use NoneType to unset a value\n continue\n if not re.match(PROCTYPE_MATCH, k):\n raise serializers.ValidationError(\"Process types can only contain [a-z]\")\n if not re.match(MEMLIMIT_MATCH, str(v)):\n raise serializers.ValidationError(\n \"Limit format: <number><unit>, where unit = B, K, M or G\")\n return value\n\n def validate_cpu(self, value):\n for k, v in value.items():\n if v is None: # use NoneType to unset a value\n continue\n if not re.match(PROCTYPE_MATCH, k):\n raise serializers.ValidationError(\"Process types can only contain [a-z]\")\n shares = re.match(CPUSHARE_MATCH, str(v))\n if not shares:\n raise serializers.ValidationError(\"CPU shares must be an integer\")\n for v in shares.groupdict().values():\n try:\n i = int(v)\n except ValueError:\n raise serializers.ValidationError(\"CPU shares must be an integer\")\n if i > 1024 or i < 0:\n raise serializers.ValidationError(\"CPU shares must be between 0 and 1024\")\n return value\n\n def validate_tags(self, value):\n for k, v in value.items():\n if v is None: # use NoneType to unset a value\n continue\n if not re.match(TAGKEY_MATCH, k):\n raise serializers.ValidationError(\"Tag keys can only contain [a-z]\")\n if not re.match(TAGVAL_MATCH, str(v)):\n raise serializers.ValidationError(\"Invalid tag value\")\n return value\n\n\nclass ReleaseSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Release` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`ReleaseSerializer`.\"\"\"\n model = models.Release\n\n\nclass ContainerSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Container` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n release = serializers.SerializerMethodField()\n\n class Meta:\n \"\"\"Metadata options for a :class:`ContainerSerializer`.\"\"\"\n model = models.Container\n\n def get_release(self, obj):\n return \"v{}\".format(obj.release.version)\n\n\nclass KeySerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Key` model.\"\"\"\n\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a KeySerializer.\"\"\"\n model = models.Key\n\n\nclass DomainSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Domain` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`DomainSerializer`.\"\"\"\n model = models.Domain\n fields = ['uuid', 'owner', 'created', 'updated', 'app', 'domain']\n\n def validate_domain(self, value):\n \"\"\"\n Check that the hostname is valid\n \"\"\"\n match = re.match(\n r'^(\\*\\.)?(' + settings.APP_URL_REGEX + r'\\.)*([a-z0-9-]+)\\.([a-z0-9]{2,})$',\n value)\n if not match:\n raise serializers.ValidationError(\n \"Hostname does not look like a valid hostname. \"\n \"Only lowercase characters are allowed.\")\n\n if models.Domain.objects.filter(domain=value).exists():\n raise serializers.ValidationError(\n \"The domain {} is already in use by another app\".format(value))\n\n domain_parts = value.split('.')\n if domain_parts[0] == '*':\n raise serializers.ValidationError(\n \"Adding a wildcard subdomain is currently not supported\".format(value))\n\n return value\n\n\nclass PushSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Push` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`PushSerializer`.\"\"\"\n model = models.Push\n fields = ['uuid', 'owner', 'app', 'sha', 'fingerprint', 'receive_user', 'receive_repo',\n 'ssh_connection', 'ssh_original_command', 'created', 'updated']\n", "path": "controller/api/serializers.py"}], "after_files": [{"content": "\"\"\"\nClasses to serialize the RESTful representation of Deis API models.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport json\nimport re\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.utils import timezone\nfrom rest_framework import serializers\nfrom rest_framework.validators import UniqueTogetherValidator\n\nfrom api import models\n\n\nPROCTYPE_MATCH = re.compile(r'^(?P<type>[a-z]+)')\nMEMLIMIT_MATCH = re.compile(r'^(?P<mem>[0-9]+[BbKkMmGg])$')\nCPUSHARE_MATCH = re.compile(r'^(?P<cpu>[0-9]+)$')\nTAGKEY_MATCH = re.compile(r'^[a-z]+$')\nTAGVAL_MATCH = re.compile(r'^\\w+$')\n\n\nclass JSONFieldSerializer(serializers.Field):\n def to_representation(self, obj):\n return obj\n\n def to_internal_value(self, data):\n try:\n val = json.loads(data)\n except TypeError:\n val = data\n return val\n\n\nclass ModelSerializer(serializers.ModelSerializer):\n\n uuid = serializers.ReadOnlyField()\n\n def get_validators(self):\n \"\"\"\n Hack to remove DRF's UniqueTogetherValidator when it concerns the UUID.\n\n See https://github.com/deis/deis/pull/2898#discussion_r23105147\n \"\"\"\n validators = super(ModelSerializer, self).get_validators()\n for v in validators:\n if isinstance(v, UniqueTogetherValidator) and 'uuid' in v.fields:\n validators.remove(v)\n return validators\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = ['email', 'username', 'password', 'first_name', 'last_name', 'is_superuser',\n 'is_staff', 'groups', 'user_permissions', 'last_login', 'date_joined',\n 'is_active']\n read_only_fields = ['is_superuser', 'is_staff', 'groups',\n 'user_permissions', 'last_login', 'date_joined', 'is_active']\n extra_kwargs = {'password': {'write_only': True}}\n\n def create(self, validated_data):\n now = timezone.now()\n user = User(\n email=validated_data.get('email'),\n username=validated_data.get('username'),\n last_login=now,\n date_joined=now,\n is_active=True\n )\n if validated_data.get('first_name'):\n user.first_name = validated_data['first_name']\n if validated_data.get('last_name'):\n user.last_name = validated_data['last_name']\n user.set_password(validated_data['password'])\n # Make the first signup an admin / superuser\n if not User.objects.filter(is_superuser=True).exists():\n user.is_superuser = user.is_staff = True\n user.save()\n return user\n\n\nclass AdminUserSerializer(serializers.ModelSerializer):\n \"\"\"Serialize admin status for a User model.\"\"\"\n\n class Meta:\n model = User\n fields = ['username', 'is_superuser']\n read_only_fields = ['username']\n\n\nclass AppSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.App` model.\"\"\"\n\n owner = serializers.ReadOnlyField(source='owner.username')\n structure = JSONFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`AppSerializer`.\"\"\"\n model = models.App\n fields = ['uuid', 'id', 'owner', 'url', 'structure', 'created', 'updated']\n read_only_fields = ['uuid']\n\n\nclass BuildSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Build` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n procfile = JSONFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`BuildSerializer`.\"\"\"\n model = models.Build\n fields = ['owner', 'app', 'image', 'sha', 'procfile', 'dockerfile', 'created',\n 'updated', 'uuid']\n read_only_fields = ['uuid']\n\n\nclass ConfigSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Config` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n values = JSONFieldSerializer(required=False)\n memory = JSONFieldSerializer(required=False)\n cpu = JSONFieldSerializer(required=False)\n tags = JSONFieldSerializer(required=False)\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`ConfigSerializer`.\"\"\"\n model = models.Config\n\n def validate_memory(self, value):\n for k, v in value.items():\n if v is None: # use NoneType to unset a value\n continue\n if not re.match(PROCTYPE_MATCH, k):\n raise serializers.ValidationError(\"Process types can only contain [a-z]\")\n if not re.match(MEMLIMIT_MATCH, str(v)):\n raise serializers.ValidationError(\n \"Limit format: <number><unit>, where unit = B, K, M or G\")\n return value\n\n def validate_cpu(self, value):\n for k, v in value.items():\n if v is None: # use NoneType to unset a value\n continue\n if not re.match(PROCTYPE_MATCH, k):\n raise serializers.ValidationError(\"Process types can only contain [a-z]\")\n shares = re.match(CPUSHARE_MATCH, str(v))\n if not shares:\n raise serializers.ValidationError(\"CPU shares must be an integer\")\n for v in shares.groupdict().values():\n try:\n i = int(v)\n except ValueError:\n raise serializers.ValidationError(\"CPU shares must be an integer\")\n if i > 1024 or i < 0:\n raise serializers.ValidationError(\"CPU shares must be between 0 and 1024\")\n return value\n\n def validate_tags(self, value):\n for k, v in value.items():\n if v is None: # use NoneType to unset a value\n continue\n if not re.match(TAGKEY_MATCH, k):\n raise serializers.ValidationError(\"Tag keys can only contain [a-z]\")\n if not re.match(TAGVAL_MATCH, str(v)):\n raise serializers.ValidationError(\"Invalid tag value\")\n return value\n\n\nclass ReleaseSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Release` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`ReleaseSerializer`.\"\"\"\n model = models.Release\n\n\nclass ContainerSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Container` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n release = serializers.SerializerMethodField()\n\n class Meta:\n \"\"\"Metadata options for a :class:`ContainerSerializer`.\"\"\"\n model = models.Container\n\n def get_release(self, obj):\n return \"v{}\".format(obj.release.version)\n\n\nclass KeySerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Key` model.\"\"\"\n\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a KeySerializer.\"\"\"\n model = models.Key\n\n\nclass DomainSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Domain` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`DomainSerializer`.\"\"\"\n model = models.Domain\n fields = ['uuid', 'owner', 'created', 'updated', 'app', 'domain']\n\n def validate_domain(self, value):\n \"\"\"\n Check that the hostname is valid\n \"\"\"\n if len(value) > 255:\n raise serializers.ValidationError('Hostname must be 255 characters or less.')\n if value[-1:] == \".\":\n value = value[:-1] # strip exactly one dot from the right, if present\n labels = value.split('.')\n if labels[0] == '*':\n raise serializers.ValidationError(\n 'Adding a wildcard subdomain is currently not supported.')\n allowed = re.compile(\"^(?!-)[a-z0-9-]{1,63}(?<!-)$\", re.IGNORECASE)\n for label in labels:\n match = allowed.match(label)\n if not match or '--' in label or label[-1].isdigit() or label.isdigit():\n raise serializers.ValidationError('Hostname does not look valid.')\n if models.Domain.objects.filter(domain=value).exists():\n raise serializers.ValidationError(\n \"The domain {} is already in use by another app\".format(value))\n return value\n\n\nclass PushSerializer(ModelSerializer):\n \"\"\"Serialize a :class:`~api.models.Push` model.\"\"\"\n\n app = serializers.SlugRelatedField(slug_field='id', queryset=models.App.objects.all())\n owner = serializers.ReadOnlyField(source='owner.username')\n created = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n updated = serializers.DateTimeField(format=settings.DEIS_DATETIME_FORMAT, read_only=True)\n\n class Meta:\n \"\"\"Metadata options for a :class:`PushSerializer`.\"\"\"\n model = models.Push\n fields = ['uuid', 'owner', 'app', 'sha', 'fingerprint', 'receive_user', 'receive_repo',\n 'ssh_connection', 'ssh_original_command', 'created', 'updated']\n", "path": "controller/api/serializers.py"}]} | 3,495 | 398 |
gh_patches_debug_7074 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-2969 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ddp_backend in 0.9.0rc12 fails if no CUDA_VISIBLE_DEVICES found
## 🐛 Bug
In ddp_backend, training immediately fails if the environment variable CUDA_VISIBLE_DEVICES isn't set. This line should handle the None case gracefully: https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/accelerators/ddp_backend.py#L90
### To Reproduce
Start a run using ddp on CPU. This was discovered using torchelastic to launch
#### Code sample
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
### Expected behavior
This shouldn't crash if the environment variable isn't set. We could default to `num_gpus = 0` in this case.
Replacing the line above with something like this could work:
`num_gpus = os.environ.get('CUDA_VISIBLE_DEVICES', []).split(',').__len__()
`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_lightning/accelerators/ddp_backend.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License
14
15 import os
16 import subprocess
17 import sys
18 from os.path import abspath
19 from time import sleep
20 from typing import Optional
21
22 import numpy as np
23 import torch
24
25 from pytorch_lightning import _logger as log
26 from pytorch_lightning.utilities import AMPType
27 from pytorch_lightning.utilities.distributed import rank_zero_only
28
29 try:
30 from hydra.utils import to_absolute_path, get_original_cwd
31 from hydra.core.hydra_config import HydraConfig
32 except ImportError:
33 HYDRA_AVAILABLE = False
34 else:
35 HYDRA_AVAILABLE = True
36
37 try:
38 from apex import amp
39 except ImportError:
40 amp = None
41
42
43 class DDPBackend(object):
44
45 def __init__(self, trainer):
46 self.trainer = trainer
47 self.task_idx = None
48
49 def slurm_setup(self):
50 self.task_idx = int(os.environ['SLURM_LOCALID'])
51
52 def torchelastic_setup(self):
53 self.task_idx = int(os.environ['LOCAL_RANK'])
54
55 def train(self, model):
56 self.ddp_train(process_idx=self.task_idx, mp_queue=None, model=model)
57
58 def spawn_ddp_children(self, model):
59 port = os.environ['MASTER_PORT']
60
61 master_address = '127.0.0.1' if 'MASTER_ADDR' not in os.environ else os.environ['MASTER_ADDR']
62 os.environ['MASTER_PORT'] = f'{port}'
63 os.environ['MASTER_ADDR'] = f'{master_address}'
64
65 # allow the user to pass the node rank
66 node_rank = '0'
67 if 'NODE_RANK' in os.environ:
68 node_rank = os.environ['NODE_RANK']
69 if 'GROUP_RANK' in os.environ:
70 node_rank = os.environ['GROUP_RANK']
71
72 os.environ['NODE_RANK'] = node_rank
73 os.environ['LOCAL_RANK'] = '0'
74
75 # when user is using hydra find the absolute path
76 path_lib = abspath if not HYDRA_AVAILABLE else to_absolute_path
77
78 # pull out the commands used to run the script and resolve the abs file path
79 command = sys.argv
80 try:
81 full_path = path_lib(command[0])
82 except Exception as e:
83 full_path = abspath(command[0])
84
85 command[0] = full_path
86 # use the same python interpreter and actually running
87 command = [sys.executable] + command
88
89 # since this script sets the visible devices we replace the gpus flag with a number
90 num_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',').__len__()
91
92 if '--gpus' in command:
93 gpu_flag_idx = command.index('--gpus')
94 command[gpu_flag_idx + 1] = f'{num_gpus}'
95
96 os.environ['WORLD_SIZE'] = f'{num_gpus * self.trainer.num_nodes}'
97
98 self.trainer.interactive_ddp_procs = []
99 for local_rank in range(1, self.trainer.num_processes):
100 env_copy = os.environ.copy()
101 env_copy['LOCAL_RANK'] = f'{local_rank}'
102
103 # start process
104 # if hydra is available and initialized, make sure to set the cwd correctly
105 cwd: Optional[str] = None
106 if HYDRA_AVAILABLE:
107 if HydraConfig.initialized():
108 cwd = get_original_cwd()
109 proc = subprocess.Popen(command, env=env_copy, cwd=cwd)
110 self.trainer.interactive_ddp_procs.append(proc)
111
112 # starting all processes at once can cause issues
113 # with dataloaders delay between 1-10 seconds
114 delay = np.random.uniform(1, 5, 1)[0]
115 sleep(delay)
116
117 local_rank = 0
118 results = self.ddp_train(local_rank, mp_queue=None, model=model, is_master=True)
119 del os.environ['WORLD_SIZE']
120
121 return results
122
123 def ddp_train(self, process_idx, mp_queue, model, is_master=False, proc_offset=0):
124 """
125 Entry point for ddp
126
127 Args:
128 process_idx:
129 mp_queue: multiprocessing queue
130 model:
131 is_master:
132 proc_offset:
133
134 Returns:
135
136 """
137 # offset the process id if requested
138 process_idx = process_idx + proc_offset
139
140 # show progressbar only on progress_rank 0
141 if (self.trainer.node_rank != 0 or process_idx != 0) and self.trainer.progress_bar_callback is not None:
142 self.trainer.progress_bar_callback.disable()
143
144 # determine which process we are and world size
145 self.trainer.local_rank = process_idx
146 self.trainer.global_rank = self.trainer.node_rank * self.trainer.num_processes + process_idx
147 self.trainer.world_size = self.trainer.num_nodes * self.trainer.num_processes
148
149 # set warning rank
150 rank_zero_only.rank = self.trainer.global_rank
151
152 # set up server using proc 0's ip address
153 # try to init for 20 times at max in case ports are taken
154 # where to store ip_table
155 model.trainer = self.trainer
156 model.init_ddp_connection(
157 self.trainer.global_rank,
158 self.trainer.world_size,
159 self.trainer.is_slurm_managing_tasks
160 )
161
162 # call setup after the ddp process has connected
163 self.trainer.call_setup_hook(model)
164
165 # on world_size=0 let everyone know training is starting
166 if self.trainer.is_global_zero:
167 log.info('-' * 100)
168 log.info(f'distributed_backend={self.trainer.distributed_backend}')
169 log.info(f'All DDP processes registered. Starting ddp with {self.trainer.world_size} processes')
170 log.info('-' * 100)
171
172 # call sync_bn before .cuda(), configure_apex and configure_ddp
173 if self.trainer.sync_batchnorm:
174 model = model.configure_sync_batchnorm(model)
175
176 # MODEL
177 # copy model to each gpu
178 if self.trainer.on_gpu:
179 gpu_idx = process_idx
180
181 # when using ddp, the master process (proc 0) continues running as the main one
182 # this means that the local rank will always be 0
183 # (even if cuda visible devices has other visible gpus)
184 # this means that the master process needs to pull the 0th visible index as the device number
185 if is_master:
186 available_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
187 gpu_idx = int(available_gpus[self.trainer.local_rank])
188
189 self.trainer.root_gpu = gpu_idx
190 torch.cuda.set_device(self.trainer.root_gpu)
191 model.cuda(self.trainer.root_gpu)
192
193 # CHOOSE OPTIMIZER
194 # allow for lr schedulers as well
195 optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)
196 self.trainer.optimizers = optimizers
197 self.trainer.lr_schedulers = lr_schedulers
198 self.trainer.optimizer_frequencies = optimizer_frequencies
199
200 # set model properties before going into wrapper
201 self.trainer.copy_trainer_model_properties(model)
202
203 # AMP - run through amp wrapper before going to distributed DP
204 if self.trainer.amp_backend == AMPType.APEX:
205 model, optimizers = model.configure_apex(amp, model, self.trainer.optimizers, self.trainer.amp_level)
206 self.trainer.optimizers = optimizers
207 self.trainer.reinit_scheduler_properties(self.trainer.optimizers, self.trainer.lr_schedulers)
208
209 # DDP2 uses all GPUs on the machine
210 if self.trainer.distributed_backend == 'ddp' or self.trainer.distributed_backend == 'ddp_spawn':
211 device_ids = [self.trainer.root_gpu]
212 else: # includes ddp_cpu
213 device_ids = None
214
215 # allow user to configure ddp
216 model = model.configure_ddp(model, device_ids)
217
218 # continue training routine
219 results = self.trainer.run_pretrain_routine(model)
220
221 # get original model
222 model = self.trainer.get_model()
223
224 # persist info in ddp_spawn
225 self.trainer.transfer_distrib_spawn_state_on_fit_end(model, mp_queue, results)
226
227 # clean up memory
228 torch.cuda.empty_cache()
229
230 if self.trainer.global_rank == 0 and self.trainer.distributed_backend not in ['ddp_spawn', 'ddp_cpu']:
231 return results
232
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytorch_lightning/accelerators/ddp_backend.py b/pytorch_lightning/accelerators/ddp_backend.py
--- a/pytorch_lightning/accelerators/ddp_backend.py
+++ b/pytorch_lightning/accelerators/ddp_backend.py
@@ -87,7 +87,7 @@
command = [sys.executable] + command
# since this script sets the visible devices we replace the gpus flag with a number
- num_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',').__len__()
+ num_gpus = os.environ.get('CUDA_VISIBLE_DEVICES', []).split(',').__len__()
if '--gpus' in command:
gpu_flag_idx = command.index('--gpus')
| {"golden_diff": "diff --git a/pytorch_lightning/accelerators/ddp_backend.py b/pytorch_lightning/accelerators/ddp_backend.py\n--- a/pytorch_lightning/accelerators/ddp_backend.py\n+++ b/pytorch_lightning/accelerators/ddp_backend.py\n@@ -87,7 +87,7 @@\n command = [sys.executable] + command\n \n # since this script sets the visible devices we replace the gpus flag with a number\n- num_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',').__len__()\n+ num_gpus = os.environ.get('CUDA_VISIBLE_DEVICES', []).split(',').__len__()\n \n if '--gpus' in command:\n gpu_flag_idx = command.index('--gpus')\n", "issue": "ddp_backend in 0.9.0rc12 fails if no CUDA_VISIBLE_DEVICES found\n## \ud83d\udc1b Bug\r\nIn ddp_backend, training immediately fails if the environment variable CUDA_VISIBLE_DEVICES isn't set. This line should handle the None case gracefully: https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/accelerators/ddp_backend.py#L90\r\n\r\n### To Reproduce\r\nStart a run using ddp on CPU. This was discovered using torchelastic to launch\r\n\r\n\r\n#### Code sample\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue. \r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n### Expected behavior\r\nThis shouldn't crash if the environment variable isn't set. We could default to `num_gpus = 0` in this case. \r\nReplacing the line above with something like this could work:\r\n\r\n`num_gpus = os.environ.get('CUDA_VISIBLE_DEVICES', []).split(',').__len__()\r\n`\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\nimport os\nimport subprocess\nimport sys\nfrom os.path import abspath\nfrom time import sleep\nfrom typing import Optional\n\nimport numpy as np\nimport torch\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.utilities import AMPType\nfrom pytorch_lightning.utilities.distributed import rank_zero_only\n\ntry:\n from hydra.utils import to_absolute_path, get_original_cwd\n from hydra.core.hydra_config import HydraConfig\nexcept ImportError:\n HYDRA_AVAILABLE = False\nelse:\n HYDRA_AVAILABLE = True\n\ntry:\n from apex import amp\nexcept ImportError:\n amp = None\n\n\nclass DDPBackend(object):\n\n def __init__(self, trainer):\n self.trainer = trainer\n self.task_idx = None\n\n def slurm_setup(self):\n self.task_idx = int(os.environ['SLURM_LOCALID'])\n\n def torchelastic_setup(self):\n self.task_idx = int(os.environ['LOCAL_RANK'])\n\n def train(self, model):\n self.ddp_train(process_idx=self.task_idx, mp_queue=None, model=model)\n\n def spawn_ddp_children(self, model):\n port = os.environ['MASTER_PORT']\n\n master_address = '127.0.0.1' if 'MASTER_ADDR' not in os.environ else os.environ['MASTER_ADDR']\n os.environ['MASTER_PORT'] = f'{port}'\n os.environ['MASTER_ADDR'] = f'{master_address}'\n\n # allow the user to pass the node rank\n node_rank = '0'\n if 'NODE_RANK' in os.environ:\n node_rank = os.environ['NODE_RANK']\n if 'GROUP_RANK' in os.environ:\n node_rank = os.environ['GROUP_RANK']\n\n os.environ['NODE_RANK'] = node_rank\n os.environ['LOCAL_RANK'] = '0'\n\n # when user is using hydra find the absolute path\n path_lib = abspath if not HYDRA_AVAILABLE else to_absolute_path\n\n # pull out the commands used to run the script and resolve the abs file path\n command = sys.argv\n try:\n full_path = path_lib(command[0])\n except Exception as e:\n full_path = abspath(command[0])\n\n command[0] = full_path\n # use the same python interpreter and actually running\n command = [sys.executable] + command\n\n # since this script sets the visible devices we replace the gpus flag with a number\n num_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',').__len__()\n\n if '--gpus' in command:\n gpu_flag_idx = command.index('--gpus')\n command[gpu_flag_idx + 1] = f'{num_gpus}'\n\n os.environ['WORLD_SIZE'] = f'{num_gpus * self.trainer.num_nodes}'\n\n self.trainer.interactive_ddp_procs = []\n for local_rank in range(1, self.trainer.num_processes):\n env_copy = os.environ.copy()\n env_copy['LOCAL_RANK'] = f'{local_rank}'\n\n # start process\n # if hydra is available and initialized, make sure to set the cwd correctly\n cwd: Optional[str] = None\n if HYDRA_AVAILABLE:\n if HydraConfig.initialized():\n cwd = get_original_cwd()\n proc = subprocess.Popen(command, env=env_copy, cwd=cwd)\n self.trainer.interactive_ddp_procs.append(proc)\n\n # starting all processes at once can cause issues\n # with dataloaders delay between 1-10 seconds\n delay = np.random.uniform(1, 5, 1)[0]\n sleep(delay)\n\n local_rank = 0\n results = self.ddp_train(local_rank, mp_queue=None, model=model, is_master=True)\n del os.environ['WORLD_SIZE']\n\n return results\n\n def ddp_train(self, process_idx, mp_queue, model, is_master=False, proc_offset=0):\n \"\"\"\n Entry point for ddp\n\n Args:\n process_idx:\n mp_queue: multiprocessing queue\n model:\n is_master:\n proc_offset:\n\n Returns:\n\n \"\"\"\n # offset the process id if requested\n process_idx = process_idx + proc_offset\n\n # show progressbar only on progress_rank 0\n if (self.trainer.node_rank != 0 or process_idx != 0) and self.trainer.progress_bar_callback is not None:\n self.trainer.progress_bar_callback.disable()\n\n # determine which process we are and world size\n self.trainer.local_rank = process_idx\n self.trainer.global_rank = self.trainer.node_rank * self.trainer.num_processes + process_idx\n self.trainer.world_size = self.trainer.num_nodes * self.trainer.num_processes\n\n # set warning rank\n rank_zero_only.rank = self.trainer.global_rank\n\n # set up server using proc 0's ip address\n # try to init for 20 times at max in case ports are taken\n # where to store ip_table\n model.trainer = self.trainer\n model.init_ddp_connection(\n self.trainer.global_rank,\n self.trainer.world_size,\n self.trainer.is_slurm_managing_tasks\n )\n\n # call setup after the ddp process has connected\n self.trainer.call_setup_hook(model)\n\n # on world_size=0 let everyone know training is starting\n if self.trainer.is_global_zero:\n log.info('-' * 100)\n log.info(f'distributed_backend={self.trainer.distributed_backend}')\n log.info(f'All DDP processes registered. Starting ddp with {self.trainer.world_size} processes')\n log.info('-' * 100)\n\n # call sync_bn before .cuda(), configure_apex and configure_ddp\n if self.trainer.sync_batchnorm:\n model = model.configure_sync_batchnorm(model)\n\n # MODEL\n # copy model to each gpu\n if self.trainer.on_gpu:\n gpu_idx = process_idx\n\n # when using ddp, the master process (proc 0) continues running as the main one\n # this means that the local rank will always be 0\n # (even if cuda visible devices has other visible gpus)\n # this means that the master process needs to pull the 0th visible index as the device number\n if is_master:\n available_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')\n gpu_idx = int(available_gpus[self.trainer.local_rank])\n\n self.trainer.root_gpu = gpu_idx\n torch.cuda.set_device(self.trainer.root_gpu)\n model.cuda(self.trainer.root_gpu)\n\n # CHOOSE OPTIMIZER\n # allow for lr schedulers as well\n optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)\n self.trainer.optimizers = optimizers\n self.trainer.lr_schedulers = lr_schedulers\n self.trainer.optimizer_frequencies = optimizer_frequencies\n\n # set model properties before going into wrapper\n self.trainer.copy_trainer_model_properties(model)\n\n # AMP - run through amp wrapper before going to distributed DP\n if self.trainer.amp_backend == AMPType.APEX:\n model, optimizers = model.configure_apex(amp, model, self.trainer.optimizers, self.trainer.amp_level)\n self.trainer.optimizers = optimizers\n self.trainer.reinit_scheduler_properties(self.trainer.optimizers, self.trainer.lr_schedulers)\n\n # DDP2 uses all GPUs on the machine\n if self.trainer.distributed_backend == 'ddp' or self.trainer.distributed_backend == 'ddp_spawn':\n device_ids = [self.trainer.root_gpu]\n else: # includes ddp_cpu\n device_ids = None\n\n # allow user to configure ddp\n model = model.configure_ddp(model, device_ids)\n\n # continue training routine\n results = self.trainer.run_pretrain_routine(model)\n\n # get original model\n model = self.trainer.get_model()\n\n # persist info in ddp_spawn\n self.trainer.transfer_distrib_spawn_state_on_fit_end(model, mp_queue, results)\n\n # clean up memory\n torch.cuda.empty_cache()\n\n if self.trainer.global_rank == 0 and self.trainer.distributed_backend not in ['ddp_spawn', 'ddp_cpu']:\n return results\n", "path": "pytorch_lightning/accelerators/ddp_backend.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\nimport os\nimport subprocess\nimport sys\nfrom os.path import abspath\nfrom time import sleep\nfrom typing import Optional\n\nimport numpy as np\nimport torch\n\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.utilities import AMPType\nfrom pytorch_lightning.utilities.distributed import rank_zero_only\n\ntry:\n from hydra.utils import to_absolute_path, get_original_cwd\n from hydra.core.hydra_config import HydraConfig\nexcept ImportError:\n HYDRA_AVAILABLE = False\nelse:\n HYDRA_AVAILABLE = True\n\ntry:\n from apex import amp\nexcept ImportError:\n amp = None\n\n\nclass DDPBackend(object):\n\n def __init__(self, trainer):\n self.trainer = trainer\n self.task_idx = None\n\n def slurm_setup(self):\n self.task_idx = int(os.environ['SLURM_LOCALID'])\n\n def torchelastic_setup(self):\n self.task_idx = int(os.environ['LOCAL_RANK'])\n\n def train(self, model):\n self.ddp_train(process_idx=self.task_idx, mp_queue=None, model=model)\n\n def spawn_ddp_children(self, model):\n port = os.environ['MASTER_PORT']\n\n master_address = '127.0.0.1' if 'MASTER_ADDR' not in os.environ else os.environ['MASTER_ADDR']\n os.environ['MASTER_PORT'] = f'{port}'\n os.environ['MASTER_ADDR'] = f'{master_address}'\n\n # allow the user to pass the node rank\n node_rank = '0'\n if 'NODE_RANK' in os.environ:\n node_rank = os.environ['NODE_RANK']\n if 'GROUP_RANK' in os.environ:\n node_rank = os.environ['GROUP_RANK']\n\n os.environ['NODE_RANK'] = node_rank\n os.environ['LOCAL_RANK'] = '0'\n\n # when user is using hydra find the absolute path\n path_lib = abspath if not HYDRA_AVAILABLE else to_absolute_path\n\n # pull out the commands used to run the script and resolve the abs file path\n command = sys.argv\n try:\n full_path = path_lib(command[0])\n except Exception as e:\n full_path = abspath(command[0])\n\n command[0] = full_path\n # use the same python interpreter and actually running\n command = [sys.executable] + command\n\n # since this script sets the visible devices we replace the gpus flag with a number\n num_gpus = os.environ.get('CUDA_VISIBLE_DEVICES', []).split(',').__len__()\n\n if '--gpus' in command:\n gpu_flag_idx = command.index('--gpus')\n command[gpu_flag_idx + 1] = f'{num_gpus}'\n\n os.environ['WORLD_SIZE'] = f'{num_gpus * self.trainer.num_nodes}'\n\n self.trainer.interactive_ddp_procs = []\n for local_rank in range(1, self.trainer.num_processes):\n env_copy = os.environ.copy()\n env_copy['LOCAL_RANK'] = f'{local_rank}'\n\n # start process\n # if hydra is available and initialized, make sure to set the cwd correctly\n cwd: Optional[str] = None\n if HYDRA_AVAILABLE:\n if HydraConfig.initialized():\n cwd = get_original_cwd()\n proc = subprocess.Popen(command, env=env_copy, cwd=cwd)\n self.trainer.interactive_ddp_procs.append(proc)\n\n # starting all processes at once can cause issues\n # with dataloaders delay between 1-10 seconds\n delay = np.random.uniform(1, 5, 1)[0]\n sleep(delay)\n\n local_rank = 0\n results = self.ddp_train(local_rank, mp_queue=None, model=model, is_master=True)\n del os.environ['WORLD_SIZE']\n\n return results\n\n def ddp_train(self, process_idx, mp_queue, model, is_master=False, proc_offset=0):\n \"\"\"\n Entry point for ddp\n\n Args:\n process_idx:\n mp_queue: multiprocessing queue\n model:\n is_master:\n proc_offset:\n\n Returns:\n\n \"\"\"\n # offset the process id if requested\n process_idx = process_idx + proc_offset\n\n # show progressbar only on progress_rank 0\n if (self.trainer.node_rank != 0 or process_idx != 0) and self.trainer.progress_bar_callback is not None:\n self.trainer.progress_bar_callback.disable()\n\n # determine which process we are and world size\n self.trainer.local_rank = process_idx\n self.trainer.global_rank = self.trainer.node_rank * self.trainer.num_processes + process_idx\n self.trainer.world_size = self.trainer.num_nodes * self.trainer.num_processes\n\n # set warning rank\n rank_zero_only.rank = self.trainer.global_rank\n\n # set up server using proc 0's ip address\n # try to init for 20 times at max in case ports are taken\n # where to store ip_table\n model.trainer = self.trainer\n model.init_ddp_connection(\n self.trainer.global_rank,\n self.trainer.world_size,\n self.trainer.is_slurm_managing_tasks\n )\n\n # call setup after the ddp process has connected\n self.trainer.call_setup_hook(model)\n\n # on world_size=0 let everyone know training is starting\n if self.trainer.is_global_zero:\n log.info('-' * 100)\n log.info(f'distributed_backend={self.trainer.distributed_backend}')\n log.info(f'All DDP processes registered. Starting ddp with {self.trainer.world_size} processes')\n log.info('-' * 100)\n\n # call sync_bn before .cuda(), configure_apex and configure_ddp\n if self.trainer.sync_batchnorm:\n model = model.configure_sync_batchnorm(model)\n\n # MODEL\n # copy model to each gpu\n if self.trainer.on_gpu:\n gpu_idx = process_idx\n\n # when using ddp, the master process (proc 0) continues running as the main one\n # this means that the local rank will always be 0\n # (even if cuda visible devices has other visible gpus)\n # this means that the master process needs to pull the 0th visible index as the device number\n if is_master:\n available_gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')\n gpu_idx = int(available_gpus[self.trainer.local_rank])\n\n self.trainer.root_gpu = gpu_idx\n torch.cuda.set_device(self.trainer.root_gpu)\n model.cuda(self.trainer.root_gpu)\n\n # CHOOSE OPTIMIZER\n # allow for lr schedulers as well\n optimizers, lr_schedulers, optimizer_frequencies = self.trainer.init_optimizers(model)\n self.trainer.optimizers = optimizers\n self.trainer.lr_schedulers = lr_schedulers\n self.trainer.optimizer_frequencies = optimizer_frequencies\n\n # set model properties before going into wrapper\n self.trainer.copy_trainer_model_properties(model)\n\n # AMP - run through amp wrapper before going to distributed DP\n if self.trainer.amp_backend == AMPType.APEX:\n model, optimizers = model.configure_apex(amp, model, self.trainer.optimizers, self.trainer.amp_level)\n self.trainer.optimizers = optimizers\n self.trainer.reinit_scheduler_properties(self.trainer.optimizers, self.trainer.lr_schedulers)\n\n # DDP2 uses all GPUs on the machine\n if self.trainer.distributed_backend == 'ddp' or self.trainer.distributed_backend == 'ddp_spawn':\n device_ids = [self.trainer.root_gpu]\n else: # includes ddp_cpu\n device_ids = None\n\n # allow user to configure ddp\n model = model.configure_ddp(model, device_ids)\n\n # continue training routine\n results = self.trainer.run_pretrain_routine(model)\n\n # get original model\n model = self.trainer.get_model()\n\n # persist info in ddp_spawn\n self.trainer.transfer_distrib_spawn_state_on_fit_end(model, mp_queue, results)\n\n # clean up memory\n torch.cuda.empty_cache()\n\n if self.trainer.global_rank == 0 and self.trainer.distributed_backend not in ['ddp_spawn', 'ddp_cpu']:\n return results\n", "path": "pytorch_lightning/accelerators/ddp_backend.py"}]} | 3,028 | 163 |
gh_patches_debug_3563 | rasdani/github-patches | git_diff | kivy__kivy-335 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Regression when upgrading from 1.0.9-stable to 1.0.10-dev?
Hello,
I just upgraded Kivy to the latest version via the "git pull" command. I was running 1.0.9 stable and updated Kivy to 1.0.10-dev.
When rerunning my program (on 1.0.9 everything works just fine), this error / stacktrace pops up: http://dpaste.com/hold/676134/.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kivy/uix/image.py`
Content:
```
1 '''
2 Image
3 =====
4
5 The :class:`Image` widget is used to display an image. ::
6
7 wimg = Image(source='mylogo.png')
8
9 Asynchronous loading
10 --------------------
11
12 To load an image asynchronously (for example from an external webserver), use
13 the :class:`AsyncImage` subclass ::
14
15 aimg = AsyncImage(source='http://mywebsite.com/logo.png')
16
17 Alignement
18 ----------
19
20 By default, the image is centered and fitted inside the widget bounding box.
21 If you don't want that, you can inherit from Image and create your own style.
22
23 For example, if you want your image to take the same size of your widget, you
24 can do ::
25
26 class FullImage(Image):
27 pass
28
29 And in your kivy language file, you can do ::
30
31 <FullImage>:
32 canvas:
33 Color:
34 rgb: (1, 1, 1)
35 Rectangle:
36 texture: self.texture
37 size: self.size
38 pos: self.pos
39
40 '''
41
42 __all__ = ('Image', 'AsyncImage')
43
44 from kivy.uix.widget import Widget
45 from kivy.core.image import Image as CoreImage
46 from kivy.resources import resource_find
47 from kivy.properties import StringProperty, ObjectProperty, ListProperty, \
48 AliasProperty, BooleanProperty, NumericProperty
49 from kivy.loader import Loader
50
51
52 class Image(Widget):
53 '''Image class, see module documentation for more information.
54 '''
55
56 source = StringProperty(None)
57 '''Filename / source of your image.
58
59 :data:`source` a :class:`~kivy.properties.StringProperty`, default to None.
60 '''
61
62 texture = ObjectProperty(None, allownone=True)
63 '''Texture object of the image.
64
65 Depending of the texture creation, the value will be a
66 :class:`~kivy.graphics.texture.Texture` or
67 :class:`~kivy.graphics.texture.TextureRegion` object.
68
69 :data:`texture` is a :class:`~kivy.properties.ObjectProperty`, default to
70 None.
71 '''
72
73 texture_size = ListProperty([0, 0])
74 '''Texture size of the image.
75
76 .. warning::
77
78 The texture size is set after the texture property. So if you listen on
79 the change to :data:`texture`, the property texture_size will be not yet
80 updated. Use self.texture.size instead.
81 '''
82
83 def get_image_ratio(self):
84 if self.texture:
85 return self.texture.width / float(self.texture.height)
86 return 1.
87
88 mipmap = BooleanProperty(False)
89 '''Indicate if you want OpenGL mipmapping to be apply on the texture or not.
90 Read :ref:`mipmap` for more information.
91
92 .. versionadded:: 1.0.7
93
94 :data:`mipmap` is a :class:`~kivy.properties.BooleanProperty`, default to
95 False.
96 '''
97
98 image_ratio = AliasProperty(get_image_ratio, None, bind=('texture', ))
99 '''Ratio of the image (width / float(height)
100
101 :data:`image_ratio` is a :class:`~kivy.properties.AliasProperty`, and is
102 read-only.
103 '''
104
105 color = ListProperty([1, 1, 1, 1])
106 '''Image color, in the format (r, g, b, a). This attribute can be used for
107 'tint' an image. Be careful, if the source image is not gray/white, the
108 color will not really work as expected.
109
110 .. versionadded:: 1.0.6
111
112 :data:`color` is a :class:`~kivy.properties.ListProperty`, default to [1, 1,
113 1, 1].
114 '''
115
116 allow_stretch = BooleanProperty(False)
117 '''If True, the normalized image size will be maximized to fit in the image
118 box. Otherwise, if the box is too tall, the image will not be streched more
119 than 1:1 pixels
120
121 .. versionadded:: 1.0.7
122
123 :data:`allow_stretch` is a :class:`~kivy.properties.BooleanProperty`,
124 default to False
125 '''
126
127 keep_ratio = BooleanProperty(True)
128 '''If False along with allow_stretch being True, the normalized image
129 size will be maximized to fit in the image box disregarding the aspect
130 ratio of the image.
131 Otherwise, if the box is too tall, the image will not be streched more
132 than 1:1 pixels
133
134 .. versionadded:: 1.0.8
135
136 :data:`keep_ratio` is a :class:`~kivy.properties.BooleanProperty`,
137 default to True
138 '''
139
140 anim_delay = NumericProperty(.25)
141 '''Delay of animation if the image is sequenced (like animated gif).
142 If the anim_delay is set to -1, the animation will be stopped.
143
144 .. versionadded:: 1.0.8
145
146 :data:`anim_delay` is a :class:`~kivy.properties.NumericProperty`, default
147 to .25 (4 FPS)
148 '''
149
150 def get_norm_image_size(self):
151 if not self.texture:
152 return self.size
153 ratio = self.image_ratio
154 w, h = self.size
155 tw, th = self.texture.size
156
157 # ensure that the width is always maximized to the containter width
158 if self.allow_stretch:
159 if not self.keep_ratio:
160 return w, h
161 iw = w
162 else:
163 iw = min(w, tw)
164 # calculate the appropriate height
165 ih = iw / ratio
166 # if the height is too higher, take the height of the container
167 # and calculate appropriate width. no need to test further. :)
168 if ih > h:
169 if self.allow_stretch:
170 ih = h
171 else:
172 ih = min(h, th)
173 iw = ih * ratio
174
175 return iw, ih
176
177
178 norm_image_size = AliasProperty(get_norm_image_size, None, bind=(
179 'texture', 'size', 'image_ratio', 'allow_stretch'))
180 '''Normalized image size withing the widget box.
181
182 This size will be always fitted to the widget size, and preserve the image
183 ratio.
184
185 :data:`norm_image_size` is a :class:`~kivy.properties.AliasProperty`, and is
186 read-only.
187 '''
188
189 def __init__(self, **kwargs):
190 self._coreimage = None
191 super(Image, self).__init__(**kwargs)
192 self.bind(source=self.texture_update,
193 mipmap=self.texture_update)
194 if self.source is not None:
195 self.texture_update()
196
197 def texture_update(self, *largs):
198 if not self.source:
199 self.texture = None
200 else:
201 filename = resource_find(self.source)
202 if filename is None:
203 return
204 mipmap = self.mipmap
205 if self._coreimage is not None:
206 self._coreimage.unbind(on_texture=self._on_tex_change)
207 self._coreimage = ci = CoreImage(filename, mipmap=mipmap,
208 anim_delay=self.anim_delay)
209 ci.bind(on_texture=self._on_tex_change)
210 self.texture = ci.texture
211
212 def on_anim_delay(self, instance, value):
213 if self._coreimage is None:
214 return
215 self._coreimage.anim_delay = value
216 if value < 0:
217 self._coreimage.anim_reset(False)
218
219 def on_texture(self, instance, value):
220 if value is not None:
221 self.texture_size = list(value.size)
222
223 def _on_tex_change(self, *largs):
224 # update texture from core image
225 self.texture = self._coreimage.texture
226
227
228 class AsyncImage(Image):
229 '''Asynchronous Image class, see module documentation for more information.
230 '''
231
232 def __init__(self, **kwargs):
233 self._coreimage = None
234 super(AsyncImage, self).__init__(**kwargs)
235 self.unbind(source=self.texture_update,
236 mipmap=self.texture_update)
237
238 def on_source(self, instance, value):
239 if not value:
240 self._coreimage.unbind(on_texture=self._on_tex_change)
241 self.texture = None
242 self._coreimage = None
243 else:
244 if not self.is_uri(value):
245 value = resource_find(value)
246 self._coreimage = image = Loader.image(value)
247 image.bind(on_load=self.on_source_load)
248 image.bind(on_texture=self._on_tex_change)
249 self.texture = image.texture
250
251 def on_source_load(self, value):
252 image = self._coreimage.image
253 if not image:
254 return
255 self.texture = image.texture
256
257 def is_uri(self, filename):
258 proto = filename.split('://', 1)[0]
259 return proto in ('http', 'https', 'ftp')
260
261 def _on_tex_change(self, *largs):
262 if self._coreimage:
263 self.texture = self._coreimage.texture
264
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kivy/uix/image.py b/kivy/uix/image.py
--- a/kivy/uix/image.py
+++ b/kivy/uix/image.py
@@ -237,7 +237,8 @@
def on_source(self, instance, value):
if not value:
- self._coreimage.unbind(on_texture=self._on_tex_change)
+ if self._coreimage is not None:
+ self._coreimage.unbind(on_texture=self._on_tex_change)
self.texture = None
self._coreimage = None
else:
| {"golden_diff": "diff --git a/kivy/uix/image.py b/kivy/uix/image.py\n--- a/kivy/uix/image.py\n+++ b/kivy/uix/image.py\n@@ -237,7 +237,8 @@\n \n def on_source(self, instance, value):\n if not value:\n- self._coreimage.unbind(on_texture=self._on_tex_change)\n+ if self._coreimage is not None:\n+ self._coreimage.unbind(on_texture=self._on_tex_change)\n self.texture = None\n self._coreimage = None\n else:\n", "issue": "Regression when upgrading from 1.0.9-stable to 1.0.10-dev?\nHello,\n\nI just upgraded Kivy to the latest version via the \"git pull\" command. I was running 1.0.9 stable and updated Kivy to 1.0.10-dev.\n\nWhen rerunning my program (on 1.0.9 everything works just fine), this error / stacktrace pops up: http://dpaste.com/hold/676134/.\n\n", "before_files": [{"content": "'''\nImage\n=====\n\nThe :class:`Image` widget is used to display an image. ::\n\n wimg = Image(source='mylogo.png')\n\nAsynchronous loading\n--------------------\n\nTo load an image asynchronously (for example from an external webserver), use\nthe :class:`AsyncImage` subclass ::\n\n aimg = AsyncImage(source='http://mywebsite.com/logo.png')\n\nAlignement\n----------\n\nBy default, the image is centered and fitted inside the widget bounding box.\nIf you don't want that, you can inherit from Image and create your own style.\n\nFor example, if you want your image to take the same size of your widget, you\ncan do ::\n\n class FullImage(Image):\n pass\n\nAnd in your kivy language file, you can do ::\n\n <FullImage>:\n canvas:\n Color:\n rgb: (1, 1, 1)\n Rectangle:\n texture: self.texture\n size: self.size\n pos: self.pos\n\n'''\n\n__all__ = ('Image', 'AsyncImage')\n\nfrom kivy.uix.widget import Widget\nfrom kivy.core.image import Image as CoreImage\nfrom kivy.resources import resource_find\nfrom kivy.properties import StringProperty, ObjectProperty, ListProperty, \\\n AliasProperty, BooleanProperty, NumericProperty\nfrom kivy.loader import Loader\n\n\nclass Image(Widget):\n '''Image class, see module documentation for more information.\n '''\n\n source = StringProperty(None)\n '''Filename / source of your image.\n\n :data:`source` a :class:`~kivy.properties.StringProperty`, default to None.\n '''\n\n texture = ObjectProperty(None, allownone=True)\n '''Texture object of the image.\n\n Depending of the texture creation, the value will be a\n :class:`~kivy.graphics.texture.Texture` or\n :class:`~kivy.graphics.texture.TextureRegion` object.\n\n :data:`texture` is a :class:`~kivy.properties.ObjectProperty`, default to\n None.\n '''\n\n texture_size = ListProperty([0, 0])\n '''Texture size of the image.\n\n .. warning::\n\n The texture size is set after the texture property. So if you listen on\n the change to :data:`texture`, the property texture_size will be not yet\n updated. Use self.texture.size instead.\n '''\n\n def get_image_ratio(self):\n if self.texture:\n return self.texture.width / float(self.texture.height)\n return 1.\n\n mipmap = BooleanProperty(False)\n '''Indicate if you want OpenGL mipmapping to be apply on the texture or not.\n Read :ref:`mipmap` for more information.\n\n .. versionadded:: 1.0.7\n\n :data:`mipmap` is a :class:`~kivy.properties.BooleanProperty`, default to\n False.\n '''\n\n image_ratio = AliasProperty(get_image_ratio, None, bind=('texture', ))\n '''Ratio of the image (width / float(height)\n\n :data:`image_ratio` is a :class:`~kivy.properties.AliasProperty`, and is\n read-only.\n '''\n\n color = ListProperty([1, 1, 1, 1])\n '''Image color, in the format (r, g, b, a). This attribute can be used for\n 'tint' an image. Be careful, if the source image is not gray/white, the\n color will not really work as expected.\n\n .. versionadded:: 1.0.6\n\n :data:`color` is a :class:`~kivy.properties.ListProperty`, default to [1, 1,\n 1, 1].\n '''\n\n allow_stretch = BooleanProperty(False)\n '''If True, the normalized image size will be maximized to fit in the image\n box. Otherwise, if the box is too tall, the image will not be streched more\n than 1:1 pixels\n\n .. versionadded:: 1.0.7\n\n :data:`allow_stretch` is a :class:`~kivy.properties.BooleanProperty`,\n default to False\n '''\n\n keep_ratio = BooleanProperty(True)\n '''If False along with allow_stretch being True, the normalized image\n size will be maximized to fit in the image box disregarding the aspect\n ratio of the image.\n Otherwise, if the box is too tall, the image will not be streched more\n than 1:1 pixels\n\n .. versionadded:: 1.0.8\n\n :data:`keep_ratio` is a :class:`~kivy.properties.BooleanProperty`,\n default to True\n '''\n\n anim_delay = NumericProperty(.25)\n '''Delay of animation if the image is sequenced (like animated gif).\n If the anim_delay is set to -1, the animation will be stopped.\n\n .. versionadded:: 1.0.8\n\n :data:`anim_delay` is a :class:`~kivy.properties.NumericProperty`, default\n to .25 (4 FPS)\n '''\n\n def get_norm_image_size(self):\n if not self.texture:\n return self.size\n ratio = self.image_ratio\n w, h = self.size\n tw, th = self.texture.size\n\n # ensure that the width is always maximized to the containter width\n if self.allow_stretch:\n if not self.keep_ratio:\n return w, h\n iw = w\n else:\n iw = min(w, tw)\n # calculate the appropriate height\n ih = iw / ratio\n # if the height is too higher, take the height of the container\n # and calculate appropriate width. no need to test further. :)\n if ih > h:\n if self.allow_stretch:\n ih = h\n else:\n ih = min(h, th)\n iw = ih * ratio\n\n return iw, ih\n\n\n norm_image_size = AliasProperty(get_norm_image_size, None, bind=(\n 'texture', 'size', 'image_ratio', 'allow_stretch'))\n '''Normalized image size withing the widget box.\n\n This size will be always fitted to the widget size, and preserve the image\n ratio.\n\n :data:`norm_image_size` is a :class:`~kivy.properties.AliasProperty`, and is\n read-only.\n '''\n\n def __init__(self, **kwargs):\n self._coreimage = None\n super(Image, self).__init__(**kwargs)\n self.bind(source=self.texture_update,\n mipmap=self.texture_update)\n if self.source is not None:\n self.texture_update()\n\n def texture_update(self, *largs):\n if not self.source:\n self.texture = None\n else:\n filename = resource_find(self.source)\n if filename is None:\n return\n mipmap = self.mipmap\n if self._coreimage is not None:\n self._coreimage.unbind(on_texture=self._on_tex_change)\n self._coreimage = ci = CoreImage(filename, mipmap=mipmap,\n anim_delay=self.anim_delay)\n ci.bind(on_texture=self._on_tex_change)\n self.texture = ci.texture\n\n def on_anim_delay(self, instance, value):\n if self._coreimage is None:\n return\n self._coreimage.anim_delay = value\n if value < 0:\n self._coreimage.anim_reset(False)\n\n def on_texture(self, instance, value):\n if value is not None:\n self.texture_size = list(value.size)\n\n def _on_tex_change(self, *largs):\n # update texture from core image\n self.texture = self._coreimage.texture\n\n\nclass AsyncImage(Image):\n '''Asynchronous Image class, see module documentation for more information.\n '''\n\n def __init__(self, **kwargs):\n self._coreimage = None\n super(AsyncImage, self).__init__(**kwargs)\n self.unbind(source=self.texture_update,\n mipmap=self.texture_update)\n\n def on_source(self, instance, value):\n if not value:\n self._coreimage.unbind(on_texture=self._on_tex_change)\n self.texture = None\n self._coreimage = None\n else:\n if not self.is_uri(value):\n value = resource_find(value)\n self._coreimage = image = Loader.image(value)\n image.bind(on_load=self.on_source_load)\n image.bind(on_texture=self._on_tex_change)\n self.texture = image.texture\n\n def on_source_load(self, value):\n image = self._coreimage.image\n if not image:\n return\n self.texture = image.texture\n\n def is_uri(self, filename):\n proto = filename.split('://', 1)[0]\n return proto in ('http', 'https', 'ftp')\n\n def _on_tex_change(self, *largs):\n if self._coreimage:\n self.texture = self._coreimage.texture\n", "path": "kivy/uix/image.py"}], "after_files": [{"content": "'''\nImage\n=====\n\nThe :class:`Image` widget is used to display an image. ::\n\n wimg = Image(source='mylogo.png')\n\nAsynchronous loading\n--------------------\n\nTo load an image asynchronously (for example from an external webserver), use\nthe :class:`AsyncImage` subclass ::\n\n aimg = AsyncImage(source='http://mywebsite.com/logo.png')\n\nAlignement\n----------\n\nBy default, the image is centered and fitted inside the widget bounding box.\nIf you don't want that, you can inherit from Image and create your own style.\n\nFor example, if you want your image to take the same size of your widget, you\ncan do ::\n\n class FullImage(Image):\n pass\n\nAnd in your kivy language file, you can do ::\n\n <FullImage>:\n canvas:\n Color:\n rgb: (1, 1, 1)\n Rectangle:\n texture: self.texture\n size: self.size\n pos: self.pos\n\n'''\n\n__all__ = ('Image', 'AsyncImage')\n\nfrom kivy.uix.widget import Widget\nfrom kivy.core.image import Image as CoreImage\nfrom kivy.resources import resource_find\nfrom kivy.properties import StringProperty, ObjectProperty, ListProperty, \\\n AliasProperty, BooleanProperty, NumericProperty\nfrom kivy.loader import Loader\n\n\nclass Image(Widget):\n '''Image class, see module documentation for more information.\n '''\n\n source = StringProperty(None)\n '''Filename / source of your image.\n\n :data:`source` a :class:`~kivy.properties.StringProperty`, default to None.\n '''\n\n texture = ObjectProperty(None, allownone=True)\n '''Texture object of the image.\n\n Depending of the texture creation, the value will be a\n :class:`~kivy.graphics.texture.Texture` or\n :class:`~kivy.graphics.texture.TextureRegion` object.\n\n :data:`texture` is a :class:`~kivy.properties.ObjectProperty`, default to\n None.\n '''\n\n texture_size = ListProperty([0, 0])\n '''Texture size of the image.\n\n .. warning::\n\n The texture size is set after the texture property. So if you listen on\n the change to :data:`texture`, the property texture_size will be not yet\n updated. Use self.texture.size instead.\n '''\n\n def get_image_ratio(self):\n if self.texture:\n return self.texture.width / float(self.texture.height)\n return 1.\n\n mipmap = BooleanProperty(False)\n '''Indicate if you want OpenGL mipmapping to be apply on the texture or not.\n Read :ref:`mipmap` for more information.\n\n .. versionadded:: 1.0.7\n\n :data:`mipmap` is a :class:`~kivy.properties.BooleanProperty`, default to\n False.\n '''\n\n image_ratio = AliasProperty(get_image_ratio, None, bind=('texture', ))\n '''Ratio of the image (width / float(height)\n\n :data:`image_ratio` is a :class:`~kivy.properties.AliasProperty`, and is\n read-only.\n '''\n\n color = ListProperty([1, 1, 1, 1])\n '''Image color, in the format (r, g, b, a). This attribute can be used for\n 'tint' an image. Be careful, if the source image is not gray/white, the\n color will not really work as expected.\n\n .. versionadded:: 1.0.6\n\n :data:`color` is a :class:`~kivy.properties.ListProperty`, default to [1, 1,\n 1, 1].\n '''\n\n allow_stretch = BooleanProperty(False)\n '''If True, the normalized image size will be maximized to fit in the image\n box. Otherwise, if the box is too tall, the image will not be streched more\n than 1:1 pixels\n\n .. versionadded:: 1.0.7\n\n :data:`allow_stretch` is a :class:`~kivy.properties.BooleanProperty`,\n default to False\n '''\n\n keep_ratio = BooleanProperty(True)\n '''If False along with allow_stretch being True, the normalized image\n size will be maximized to fit in the image box disregarding the aspect\n ratio of the image.\n Otherwise, if the box is too tall, the image will not be streched more\n than 1:1 pixels\n\n .. versionadded:: 1.0.8\n\n :data:`keep_ratio` is a :class:`~kivy.properties.BooleanProperty`,\n default to True\n '''\n\n anim_delay = NumericProperty(.25)\n '''Delay of animation if the image is sequenced (like animated gif).\n If the anim_delay is set to -1, the animation will be stopped.\n\n .. versionadded:: 1.0.8\n\n :data:`anim_delay` is a :class:`~kivy.properties.NumericProperty`, default\n to .25 (4 FPS)\n '''\n\n def get_norm_image_size(self):\n if not self.texture:\n return self.size\n ratio = self.image_ratio\n w, h = self.size\n tw, th = self.texture.size\n\n # ensure that the width is always maximized to the containter width\n if self.allow_stretch:\n if not self.keep_ratio:\n return w, h\n iw = w\n else:\n iw = min(w, tw)\n # calculate the appropriate height\n ih = iw / ratio\n # if the height is too higher, take the height of the container\n # and calculate appropriate width. no need to test further. :)\n if ih > h:\n if self.allow_stretch:\n ih = h\n else:\n ih = min(h, th)\n iw = ih * ratio\n\n return iw, ih\n\n\n norm_image_size = AliasProperty(get_norm_image_size, None, bind=(\n 'texture', 'size', 'image_ratio', 'allow_stretch'))\n '''Normalized image size withing the widget box.\n\n This size will be always fitted to the widget size, and preserve the image\n ratio.\n\n :data:`norm_image_size` is a :class:`~kivy.properties.AliasProperty`, and is\n read-only.\n '''\n\n def __init__(self, **kwargs):\n self._coreimage = None\n super(Image, self).__init__(**kwargs)\n self.bind(source=self.texture_update,\n mipmap=self.texture_update)\n if self.source is not None:\n self.texture_update()\n\n def texture_update(self, *largs):\n if not self.source:\n self.texture = None\n else:\n filename = resource_find(self.source)\n if filename is None:\n return\n mipmap = self.mipmap\n if self._coreimage is not None:\n self._coreimage.unbind(on_texture=self._on_tex_change)\n self._coreimage = ci = CoreImage(filename, mipmap=mipmap,\n anim_delay=self.anim_delay)\n ci.bind(on_texture=self._on_tex_change)\n self.texture = ci.texture\n\n def on_anim_delay(self, instance, value):\n if self._coreimage is None:\n return\n self._coreimage.anim_delay = value\n if value < 0:\n self._coreimage.anim_reset(False)\n\n def on_texture(self, instance, value):\n if value is not None:\n self.texture_size = list(value.size)\n\n def _on_tex_change(self, *largs):\n # update texture from core image\n self.texture = self._coreimage.texture\n\n\nclass AsyncImage(Image):\n '''Asynchronous Image class, see module documentation for more information.\n '''\n\n def __init__(self, **kwargs):\n self._coreimage = None\n super(AsyncImage, self).__init__(**kwargs)\n self.unbind(source=self.texture_update,\n mipmap=self.texture_update)\n\n def on_source(self, instance, value):\n if not value:\n if self._coreimage is not None:\n self._coreimage.unbind(on_texture=self._on_tex_change)\n self.texture = None\n self._coreimage = None\n else:\n if not self.is_uri(value):\n value = resource_find(value)\n self._coreimage = image = Loader.image(value)\n image.bind(on_load=self.on_source_load)\n image.bind(on_texture=self._on_tex_change)\n self.texture = image.texture\n\n def on_source_load(self, value):\n image = self._coreimage.image\n if not image:\n return\n self.texture = image.texture\n\n def is_uri(self, filename):\n proto = filename.split('://', 1)[0]\n return proto in ('http', 'https', 'ftp')\n\n def _on_tex_change(self, *largs):\n if self._coreimage:\n self.texture = self._coreimage.texture\n", "path": "kivy/uix/image.py"}]} | 2,985 | 124 |
gh_patches_debug_14311 | rasdani/github-patches | git_diff | googleapis__google-cloud-python-6264 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Monitoring: where is CallOptions on monitoring API example?
[OS] macOS Sierra 10.12.6
[Versions]
- Python 3.6.1
```
google-api-core==1.2.1
google-api-python-client==1.7.3
google-auth==1.5.0
google-auth-httplib2==0.0.3
google-cloud-monitoring==0.30.0
googleapis-common-protos==1.5.3
```
----
## CallOptions class was not found!
Hi. I'm new to GCP and Stackdriver. I wanted to use Google Kubernetes Engine and its auto scaling by custom metrics. Then, it is required to export the metrics to Stackdriver Monitoring, so I am trying to do it.
But, After installing above-mentioned libraries, the example code on monitoring API README document failed. The pit hole is that `CallOptions` was not found, thus I've searched it in this repository and some other repositories.
And finally, I couldn't find it...
`CallOptions` is defined in gax.python, but the package is currently deprecated and moved to google-api-core. So I guess that also the dependency is currently corrupted or the some examples are out-of-date.
Please tell me how handle this problem.
_Thank you for the great package and platform._
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vision/google/cloud/vision_helpers/__init__.py`
Content:
```
1 # Copyright 2017, Google LLC All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from __future__ import absolute_import
16 import io
17
18 from google.api_core import protobuf_helpers as protobuf
19
20
21 class VisionHelpers(object):
22 """A set of convenience methods to make the Vision GAPIC easier to use.
23
24 This class should be considered abstract; it is used as a superclass
25 in a multiple-inheritance construction alongside the applicable GAPIC.
26 See the :class:`~google.cloud.vision_v1.ImageAnnotatorClient`.
27 """
28 def annotate_image(self, request, retry=None, timeout=None):
29 """Run image detection and annotation for an image.
30
31 Example:
32 >>> from google.cloud.vision_v1 import ImageAnnotatorClient
33 >>> client = ImageAnnotatorClient()
34 >>> request = {
35 ... 'image': {
36 ... 'source': {'image_uri': 'https://foo.com/image.jpg'},
37 ... },
38 ... }
39 >>> response = client.annotate_image(request)
40
41 Args:
42 request (:class:`~.vision_v1.types.AnnotateImageRequest`)
43 options (:class:`google.gax.CallOptions`): Overrides the default
44 settings for this call, e.g, timeout, retries, etc.
45
46 Returns:
47 :class:`~.vision_v1.types.AnnotateImageResponse` The API response.
48 """
49 # If the image is a file handler, set the content.
50 image = protobuf.get(request, 'image')
51 if hasattr(image, 'read'):
52 img_bytes = image.read()
53 protobuf.set(request, 'image', {})
54 protobuf.set(request, 'image.content', img_bytes)
55 image = protobuf.get(request, 'image')
56
57 # If a filename is provided, read the file.
58 filename = protobuf.get(image, 'source.filename', default=None)
59 if filename:
60 with io.open(filename, 'rb') as img_file:
61 protobuf.set(request, 'image.content', img_file.read())
62 protobuf.set(request, 'image.source', None)
63
64 # This method allows features not to be specified, and you get all
65 # of them.
66 protobuf.setdefault(request, 'features', self._get_all_features())
67 r = self.batch_annotate_images([request], retry=retry, timeout=timeout)
68 return r.responses[0]
69
70 def _get_all_features(self):
71 """Return a list of all features.
72
73 Returns:
74 list: A list of all available features.
75 """
76 return [
77 {'type': feature}
78 for feature in self.enums.Feature.Type if feature != 0]
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vision/google/cloud/vision_helpers/__init__.py b/vision/google/cloud/vision_helpers/__init__.py
--- a/vision/google/cloud/vision_helpers/__init__.py
+++ b/vision/google/cloud/vision_helpers/__init__.py
@@ -40,8 +40,12 @@
Args:
request (:class:`~.vision_v1.types.AnnotateImageRequest`)
- options (:class:`google.gax.CallOptions`): Overrides the default
- settings for this call, e.g, timeout, retries, etc.
+ retry (Optional[google.api_core.retry.Retry]): A retry object used
+ to retry requests. If ``None`` is specified, requests will not
+ be retried.
+ timeout (Optional[float]): The amount of time, in seconds, to wait
+ for the request to complete. Note that if ``retry`` is
+ specified, the timeout applies to each individual attempt.
Returns:
:class:`~.vision_v1.types.AnnotateImageResponse` The API response.
| {"golden_diff": "diff --git a/vision/google/cloud/vision_helpers/__init__.py b/vision/google/cloud/vision_helpers/__init__.py\n--- a/vision/google/cloud/vision_helpers/__init__.py\n+++ b/vision/google/cloud/vision_helpers/__init__.py\n@@ -40,8 +40,12 @@\n \n Args:\n request (:class:`~.vision_v1.types.AnnotateImageRequest`)\n- options (:class:`google.gax.CallOptions`): Overrides the default\n- settings for this call, e.g, timeout, retries, etc.\n+ retry (Optional[google.api_core.retry.Retry]): A retry object used\n+ to retry requests. If ``None`` is specified, requests will not\n+ be retried.\n+ timeout (Optional[float]): The amount of time, in seconds, to wait\n+ for the request to complete. Note that if ``retry`` is\n+ specified, the timeout applies to each individual attempt.\n \n Returns:\n :class:`~.vision_v1.types.AnnotateImageResponse` The API response.\n", "issue": "Monitoring: where is CallOptions on monitoring API example?\n[OS] macOS Sierra 10.12.6\r\n[Versions]\r\n\r\n- Python 3.6.1\r\n\r\n```\r\ngoogle-api-core==1.2.1\r\ngoogle-api-python-client==1.7.3\r\ngoogle-auth==1.5.0\r\ngoogle-auth-httplib2==0.0.3\r\ngoogle-cloud-monitoring==0.30.0\r\ngoogleapis-common-protos==1.5.3\r\n```\r\n\r\n----\r\n\r\n## CallOptions class was not found!\r\n\r\nHi. I'm new to GCP and Stackdriver. I wanted to use Google Kubernetes Engine and its auto scaling by custom metrics. Then, it is required to export the metrics to Stackdriver Monitoring, so I am trying to do it.\r\n\r\nBut, After installing above-mentioned libraries, the example code on monitoring API README document failed. The pit hole is that `CallOptions` was not found, thus I've searched it in this repository and some other repositories.\r\n\r\nAnd finally, I couldn't find it...\r\n\r\n`CallOptions` is defined in gax.python, but the package is currently deprecated and moved to google-api-core. So I guess that also the dependency is currently corrupted or the some examples are out-of-date.\r\n\r\nPlease tell me how handle this problem.\r\n\r\n_Thank you for the great package and platform._\n", "before_files": [{"content": "# Copyright 2017, Google LLC All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport io\n\nfrom google.api_core import protobuf_helpers as protobuf\n\n\nclass VisionHelpers(object):\n \"\"\"A set of convenience methods to make the Vision GAPIC easier to use.\n\n This class should be considered abstract; it is used as a superclass\n in a multiple-inheritance construction alongside the applicable GAPIC.\n See the :class:`~google.cloud.vision_v1.ImageAnnotatorClient`.\n \"\"\"\n def annotate_image(self, request, retry=None, timeout=None):\n \"\"\"Run image detection and annotation for an image.\n\n Example:\n >>> from google.cloud.vision_v1 import ImageAnnotatorClient\n >>> client = ImageAnnotatorClient()\n >>> request = {\n ... 'image': {\n ... 'source': {'image_uri': 'https://foo.com/image.jpg'},\n ... },\n ... }\n >>> response = client.annotate_image(request)\n\n Args:\n request (:class:`~.vision_v1.types.AnnotateImageRequest`)\n options (:class:`google.gax.CallOptions`): Overrides the default\n settings for this call, e.g, timeout, retries, etc.\n\n Returns:\n :class:`~.vision_v1.types.AnnotateImageResponse` The API response.\n \"\"\"\n # If the image is a file handler, set the content.\n image = protobuf.get(request, 'image')\n if hasattr(image, 'read'):\n img_bytes = image.read()\n protobuf.set(request, 'image', {})\n protobuf.set(request, 'image.content', img_bytes)\n image = protobuf.get(request, 'image')\n\n # If a filename is provided, read the file.\n filename = protobuf.get(image, 'source.filename', default=None)\n if filename:\n with io.open(filename, 'rb') as img_file:\n protobuf.set(request, 'image.content', img_file.read())\n protobuf.set(request, 'image.source', None)\n\n # This method allows features not to be specified, and you get all\n # of them.\n protobuf.setdefault(request, 'features', self._get_all_features())\n r = self.batch_annotate_images([request], retry=retry, timeout=timeout)\n return r.responses[0]\n\n def _get_all_features(self):\n \"\"\"Return a list of all features.\n\n Returns:\n list: A list of all available features.\n \"\"\"\n return [\n {'type': feature}\n for feature in self.enums.Feature.Type if feature != 0]\n", "path": "vision/google/cloud/vision_helpers/__init__.py"}], "after_files": [{"content": "# Copyright 2017, Google LLC All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nimport io\n\nfrom google.api_core import protobuf_helpers as protobuf\n\n\nclass VisionHelpers(object):\n \"\"\"A set of convenience methods to make the Vision GAPIC easier to use.\n\n This class should be considered abstract; it is used as a superclass\n in a multiple-inheritance construction alongside the applicable GAPIC.\n See the :class:`~google.cloud.vision_v1.ImageAnnotatorClient`.\n \"\"\"\n def annotate_image(self, request, retry=None, timeout=None):\n \"\"\"Run image detection and annotation for an image.\n\n Example:\n >>> from google.cloud.vision_v1 import ImageAnnotatorClient\n >>> client = ImageAnnotatorClient()\n >>> request = {\n ... 'image': {\n ... 'source': {'image_uri': 'https://foo.com/image.jpg'},\n ... },\n ... }\n >>> response = client.annotate_image(request)\n\n Args:\n request (:class:`~.vision_v1.types.AnnotateImageRequest`)\n retry (Optional[google.api_core.retry.Retry]): A retry object used\n to retry requests. If ``None`` is specified, requests will not\n be retried.\n timeout (Optional[float]): The amount of time, in seconds, to wait\n for the request to complete. Note that if ``retry`` is\n specified, the timeout applies to each individual attempt.\n\n Returns:\n :class:`~.vision_v1.types.AnnotateImageResponse` The API response.\n \"\"\"\n # If the image is a file handler, set the content.\n image = protobuf.get(request, 'image')\n if hasattr(image, 'read'):\n img_bytes = image.read()\n protobuf.set(request, 'image', {})\n protobuf.set(request, 'image.content', img_bytes)\n image = protobuf.get(request, 'image')\n\n # If a filename is provided, read the file.\n filename = protobuf.get(image, 'source.filename', default=None)\n if filename:\n with io.open(filename, 'rb') as img_file:\n protobuf.set(request, 'image.content', img_file.read())\n protobuf.set(request, 'image.source', None)\n\n # This method allows features not to be specified, and you get all\n # of them.\n protobuf.setdefault(request, 'features', self._get_all_features())\n r = self.batch_annotate_images([request], retry=retry, timeout=timeout)\n return r.responses[0]\n\n def _get_all_features(self):\n \"\"\"Return a list of all features.\n\n Returns:\n list: A list of all available features.\n \"\"\"\n return [\n {'type': feature}\n for feature in self.enums.Feature.Type if feature != 0]\n", "path": "vision/google/cloud/vision_helpers/__init__.py"}]} | 1,362 | 233 |
gh_patches_debug_34678 | rasdani/github-patches | git_diff | easybuilders__easybuild-easyblocks-2903 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
meson/ninja scipy install not producing egg / dist info
And that means it does not appear in the result of `pip list` and causes anything requiring `scipy` to fail the sanity check.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `easybuild/easyblocks/s/scipy.py`
Content:
```
1 ##
2 # Copyright 2009-2023 Ghent University
3 #
4 # This file is part of EasyBuild,
5 # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
6 # with support of Ghent University (http://ugent.be/hpc),
7 # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
8 # Flemish Research Foundation (FWO) (http://www.fwo.be/en)
9 # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
10 #
11 # https://github.com/easybuilders/easybuild
12 #
13 # EasyBuild is free software: you can redistribute it and/or modify
14 # it under the terms of the GNU General Public License as published by
15 # the Free Software Foundation v2.
16 #
17 # EasyBuild is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
21 #
22 # You should have received a copy of the GNU General Public License
23 # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
24 ##
25 """
26 EasyBuild support for building and installing scipy, implemented as an easyblock
27
28 @author: Stijn De Weirdt (Ghent University)
29 @author: Dries Verdegem (Ghent University)
30 @author: Kenneth Hoste (Ghent University)
31 @author: Pieter De Baets (Ghent University)
32 @author: Jens Timmerman (Ghent University)
33 @author: Jasper Grimm (University of York)
34 """
35 import os
36 import tempfile
37 from distutils.version import LooseVersion
38
39 import easybuild.tools.environment as env
40 import easybuild.tools.toolchain as toolchain
41 from easybuild.easyblocks.generic.fortranpythonpackage import FortranPythonPackage
42 from easybuild.easyblocks.generic.mesonninja import MesonNinja
43 from easybuild.easyblocks.generic.pythonpackage import PythonPackage, det_pylibdir
44 from easybuild.framework.easyconfig import CUSTOM
45 from easybuild.tools.build_log import EasyBuildError
46 from easybuild.tools.filetools import change_dir, copy_dir
47
48
49 class EB_scipy(FortranPythonPackage, PythonPackage, MesonNinja):
50 """Support for installing the scipy Python package as part of a Python installation."""
51
52 @staticmethod
53 def extra_options():
54 """Easyconfig parameters specific to scipy."""
55 extra_vars = ({
56 'enable_slow_tests': [False, "Run scipy test suite, including tests marked as slow", CUSTOM],
57 'ignore_test_result': [None, "Run scipy test suite, but ignore test failures (True/False/None). Default "
58 "(None) implies True for scipy < 1.9, and False for scipy >= 1.9", CUSTOM],
59 })
60
61 return PythonPackage.extra_options(extra_vars=extra_vars)
62
63 def __init__(self, *args, **kwargs):
64 """Set scipy-specific test command."""
65 # calling PythonPackage __init__ also lets MesonNinja work in an extension
66 PythonPackage.__init__(self, *args, **kwargs)
67 self.testinstall = True
68
69 if LooseVersion(self.version) >= LooseVersion('1.9'):
70 self.use_meson = True
71
72 # enforce scipy test suite results if not explicitly disabled for scipy >= 1.9
73 # strip inherited PythonPackage installopts
74 installopts = self.cfg['installopts']
75 pythonpackage_installopts = ['--no-deps', '--ignore-installed', '--no-index', '--egg',
76 '--zip-ok', '--no-index']
77 self.log.info("Stripping inherited PythonPackage installopts %s from installopts %s",
78 pythonpackage_installopts, installopts)
79 for i in pythonpackage_installopts:
80 installopts = installopts.replace(i, '')
81 self.cfg['installopts'] = installopts
82
83 else:
84 self.use_meson = False
85
86 if self.cfg['ignore_test_result'] is None:
87 # automatically ignore scipy test suite results for scipy < 1.9, as we did in older easyconfigs
88 self.cfg['ignore_test_result'] = LooseVersion(self.version) < '1.9'
89 self.log.info("ignore_test_result not specified, so automatically set to %s for scipy %s",
90 self.cfg['ignore_test_result'], self.version)
91
92 if self.cfg['ignore_test_result']:
93 # used to maintain compatibility with easyconfigs predating scipy 1.9;
94 # runs tests (serially) in a way that exits with code 0 regardless of test results,
95 # see https://github.com/easybuilders/easybuild-easyblocks/issues/2237
96 self.testcmd = "cd .. && %(python)s -c 'import numpy; import scipy; scipy.test(verbose=2)'"
97 else:
98 self.testcmd = " && ".join([
99 "cd ..",
100 "touch %(srcdir)s/.coveragerc",
101 "%(python)s %(srcdir)s/runtests.py -v --no-build --parallel %(parallel)s",
102 ])
103 if self.cfg['enable_slow_tests']:
104 self.testcmd += " -m full "
105
106 def configure_step(self):
107 """Custom configure step for scipy: set extra installation options when needed."""
108
109 # scipy >= 1.9.0 uses Meson/Ninja
110 if self.use_meson:
111 # configure BLAS/LAPACK library to use with Meson for scipy >= 1.9.0
112 lapack_lib = self.toolchain.lapack_family()
113 if lapack_lib == toolchain.FLEXIBLAS:
114 blas_lapack = 'flexiblas'
115 elif lapack_lib == toolchain.INTELMKL:
116 blas_lapack = 'mkl'
117 elif lapack_lib == toolchain.OPENBLAS:
118 blas_lapack = 'openblas'
119 else:
120 raise EasyBuildError("Unknown BLAS/LAPACK library used: %s", lapack_lib)
121
122 for opt in ('blas', 'lapack'):
123 self.cfg.update('configopts', '-D%(opt)s=%(blas_lapack)s' % {'opt': opt, 'blas_lapack': blas_lapack})
124
125 # need to have already installed extensions in PATH, PYTHONPATH for configure/build/install steps
126 pythonpath = os.getenv('PYTHONPATH')
127 pylibdir = det_pylibdir()
128 env.setvar('PYTHONPATH', os.pathsep.join([os.path.join(self.installdir, pylibdir), pythonpath]))
129
130 path = os.getenv('PATH')
131 env.setvar('PATH', os.pathsep.join([os.path.join(self.installdir, 'bin'), path]))
132
133 MesonNinja.configure_step(self)
134
135 else:
136 # scipy < 1.9.0 uses install procedure using setup.py
137 FortranPythonPackage.configure_step(self)
138
139 if LooseVersion(self.version) >= LooseVersion('0.13'):
140 # in recent scipy versions, additional compilation is done in the install step,
141 # which requires unsetting $LDFLAGS
142 if self.toolchain.comp_family() in [toolchain.GCC, toolchain.CLANGGCC]: # @UndefinedVariable
143 self.cfg.update('preinstallopts', "unset LDFLAGS && ")
144
145 def build_step(self):
146 """Custom build step for scipy: use ninja for scipy >= 1.9.0"""
147 if self.use_meson:
148 MesonNinja.build_step(self)
149 else:
150 FortranPythonPackage.build_step(self)
151
152 def test_step(self):
153 """Run available scipy unit tests. Adapted from numpy easyblock"""
154
155 if self.use_meson:
156 # temporarily install scipy so we can run the test suite
157 tmpdir = tempfile.mkdtemp()
158 cwd = os.getcwd()
159
160 tmp_builddir = os.path.join(tmpdir, 'build')
161 tmp_installdir = os.path.join(tmpdir, 'install')
162
163 # create a copy of the builddir
164 copy_dir(cwd, tmp_builddir)
165 change_dir(tmp_builddir)
166
167 # reconfigure (to update prefix), and install to tmpdir
168 MesonNinja.configure_step(self, cmd_prefix=tmp_installdir)
169 MesonNinja.install_step(self)
170
171 tmp_pylibdir = [os.path.join(tmp_installdir, det_pylibdir())]
172 self.prepare_python()
173
174 self.cfg['pretestopts'] = " && ".join([
175 # LDFLAGS should not be set when testing numpy/scipy, because it overwrites whatever numpy/scipy sets
176 # see http://projects.scipy.org/numpy/ticket/182
177 "unset LDFLAGS",
178 "export PYTHONPATH=%s:$PYTHONPATH" % tmp_pylibdir,
179 "",
180 ])
181 self.cfg['runtest'] = self.testcmd % {
182 'python': self.python_cmd,
183 'srcdir': self.cfg['start_dir'],
184 'parallel': self.cfg['parallel'],
185 }
186
187 MesonNinja.test_step(self)
188
189 else:
190 self.testcmd = self.testcmd % {
191 'python': '%(python)s',
192 'srcdir': self.cfg['start_dir'],
193 'parallel': self.cfg['parallel'],
194 }
195 FortranPythonPackage.test_step(self)
196
197 def install_step(self):
198 """Custom install step for scipy: use ninja for scipy >= 1.9.0"""
199 if self.use_meson:
200 MesonNinja.install_step(self)
201 else:
202 FortranPythonPackage.install_step(self)
203
204 def sanity_check_step(self, *args, **kwargs):
205 """Custom sanity check for scipy."""
206
207 # can't use self.pylibdir here, need to determine path on the fly using currently active 'python' command;
208 # this is important for numpy installations for multiple Python version (via multi_deps)
209 custom_paths = {
210 'files': [],
211 'dirs': [det_pylibdir()],
212 }
213
214 return PythonPackage.sanity_check_step(self, custom_paths=custom_paths)
215
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/easybuild/easyblocks/s/scipy.py b/easybuild/easyblocks/s/scipy.py
--- a/easybuild/easyblocks/s/scipy.py
+++ b/easybuild/easyblocks/s/scipy.py
@@ -42,8 +42,8 @@
from easybuild.easyblocks.generic.mesonninja import MesonNinja
from easybuild.easyblocks.generic.pythonpackage import PythonPackage, det_pylibdir
from easybuild.framework.easyconfig import CUSTOM
-from easybuild.tools.build_log import EasyBuildError
-from easybuild.tools.filetools import change_dir, copy_dir
+from easybuild.tools.build_log import EasyBuildError, print_warning
+from easybuild.tools.filetools import change_dir, copy_dir, copy_file
class EB_scipy(FortranPythonPackage, PythonPackage, MesonNinja):
@@ -198,6 +198,18 @@
"""Custom install step for scipy: use ninja for scipy >= 1.9.0"""
if self.use_meson:
MesonNinja.install_step(self)
+
+ # copy PKG-INFO file included in scipy source tarball to scipy-<version>.egg-info in installation,
+ # so pip is aware of the scipy installation (required for 'pip list', 'pip check', etc.);
+ # see also https://github.com/easybuilders/easybuild-easyblocks/issues/2901
+ pkg_info = os.path.join(self.cfg['start_dir'], 'PKG-INFO')
+ target_egg_info = os.path.join(self.installdir, self.pylibdir, 'scipy-%s.egg-info' % self.version)
+ if os.path.isfile(pkg_info):
+ copy_file(pkg_info, target_egg_info)
+ else:
+ cwd = os.getcwd()
+ print_warning("%s not found in %s, so can't use it to create %s!", pkg_info, cwd, target_egg_info,
+ log=self.log)
else:
FortranPythonPackage.install_step(self)
@@ -211,4 +223,9 @@
'dirs': [det_pylibdir()],
}
- return PythonPackage.sanity_check_step(self, custom_paths=custom_paths)
+ # make sure that scipy is included in output of 'pip list',
+ # so that 'pip check' passes if scipy is a required dependency for another Python package;
+ # use case-insensitive match, since name is sometimes reported as 'SciPy'
+ custom_commands = [r"pip list | grep -iE '^scipy\s+%s\s*$'" % self.version.replace('.', r'\.')]
+
+ return PythonPackage.sanity_check_step(self, custom_paths=custom_paths, custom_commands=custom_commands)
| {"golden_diff": "diff --git a/easybuild/easyblocks/s/scipy.py b/easybuild/easyblocks/s/scipy.py\n--- a/easybuild/easyblocks/s/scipy.py\n+++ b/easybuild/easyblocks/s/scipy.py\n@@ -42,8 +42,8 @@\n from easybuild.easyblocks.generic.mesonninja import MesonNinja\n from easybuild.easyblocks.generic.pythonpackage import PythonPackage, det_pylibdir\n from easybuild.framework.easyconfig import CUSTOM\n-from easybuild.tools.build_log import EasyBuildError\n-from easybuild.tools.filetools import change_dir, copy_dir\n+from easybuild.tools.build_log import EasyBuildError, print_warning\n+from easybuild.tools.filetools import change_dir, copy_dir, copy_file\n \n \n class EB_scipy(FortranPythonPackage, PythonPackage, MesonNinja):\n@@ -198,6 +198,18 @@\n \"\"\"Custom install step for scipy: use ninja for scipy >= 1.9.0\"\"\"\n if self.use_meson:\n MesonNinja.install_step(self)\n+\n+ # copy PKG-INFO file included in scipy source tarball to scipy-<version>.egg-info in installation,\n+ # so pip is aware of the scipy installation (required for 'pip list', 'pip check', etc.);\n+ # see also https://github.com/easybuilders/easybuild-easyblocks/issues/2901\n+ pkg_info = os.path.join(self.cfg['start_dir'], 'PKG-INFO')\n+ target_egg_info = os.path.join(self.installdir, self.pylibdir, 'scipy-%s.egg-info' % self.version)\n+ if os.path.isfile(pkg_info):\n+ copy_file(pkg_info, target_egg_info)\n+ else:\n+ cwd = os.getcwd()\n+ print_warning(\"%s not found in %s, so can't use it to create %s!\", pkg_info, cwd, target_egg_info,\n+ log=self.log)\n else:\n FortranPythonPackage.install_step(self)\n \n@@ -211,4 +223,9 @@\n 'dirs': [det_pylibdir()],\n }\n \n- return PythonPackage.sanity_check_step(self, custom_paths=custom_paths)\n+ # make sure that scipy is included in output of 'pip list',\n+ # so that 'pip check' passes if scipy is a required dependency for another Python package;\n+ # use case-insensitive match, since name is sometimes reported as 'SciPy'\n+ custom_commands = [r\"pip list | grep -iE '^scipy\\s+%s\\s*$'\" % self.version.replace('.', r'\\.')]\n+\n+ return PythonPackage.sanity_check_step(self, custom_paths=custom_paths, custom_commands=custom_commands)\n", "issue": "meson/ninja scipy install not producing egg / dist info \nAnd that means it does not appear in the result of `pip list` and causes anything requiring `scipy` to fail the sanity check.\n", "before_files": [{"content": "##\n# Copyright 2009-2023 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for building and installing scipy, implemented as an easyblock\n\n@author: Stijn De Weirdt (Ghent University)\n@author: Dries Verdegem (Ghent University)\n@author: Kenneth Hoste (Ghent University)\n@author: Pieter De Baets (Ghent University)\n@author: Jens Timmerman (Ghent University)\n@author: Jasper Grimm (University of York)\n\"\"\"\nimport os\nimport tempfile\nfrom distutils.version import LooseVersion\n\nimport easybuild.tools.environment as env\nimport easybuild.tools.toolchain as toolchain\nfrom easybuild.easyblocks.generic.fortranpythonpackage import FortranPythonPackage\nfrom easybuild.easyblocks.generic.mesonninja import MesonNinja\nfrom easybuild.easyblocks.generic.pythonpackage import PythonPackage, det_pylibdir\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.filetools import change_dir, copy_dir\n\n\nclass EB_scipy(FortranPythonPackage, PythonPackage, MesonNinja):\n \"\"\"Support for installing the scipy Python package as part of a Python installation.\"\"\"\n\n @staticmethod\n def extra_options():\n \"\"\"Easyconfig parameters specific to scipy.\"\"\"\n extra_vars = ({\n 'enable_slow_tests': [False, \"Run scipy test suite, including tests marked as slow\", CUSTOM],\n 'ignore_test_result': [None, \"Run scipy test suite, but ignore test failures (True/False/None). Default \"\n \"(None) implies True for scipy < 1.9, and False for scipy >= 1.9\", CUSTOM],\n })\n\n return PythonPackage.extra_options(extra_vars=extra_vars)\n\n def __init__(self, *args, **kwargs):\n \"\"\"Set scipy-specific test command.\"\"\"\n # calling PythonPackage __init__ also lets MesonNinja work in an extension\n PythonPackage.__init__(self, *args, **kwargs)\n self.testinstall = True\n\n if LooseVersion(self.version) >= LooseVersion('1.9'):\n self.use_meson = True\n\n # enforce scipy test suite results if not explicitly disabled for scipy >= 1.9\n # strip inherited PythonPackage installopts\n installopts = self.cfg['installopts']\n pythonpackage_installopts = ['--no-deps', '--ignore-installed', '--no-index', '--egg',\n '--zip-ok', '--no-index']\n self.log.info(\"Stripping inherited PythonPackage installopts %s from installopts %s\",\n pythonpackage_installopts, installopts)\n for i in pythonpackage_installopts:\n installopts = installopts.replace(i, '')\n self.cfg['installopts'] = installopts\n\n else:\n self.use_meson = False\n\n if self.cfg['ignore_test_result'] is None:\n # automatically ignore scipy test suite results for scipy < 1.9, as we did in older easyconfigs\n self.cfg['ignore_test_result'] = LooseVersion(self.version) < '1.9'\n self.log.info(\"ignore_test_result not specified, so automatically set to %s for scipy %s\",\n self.cfg['ignore_test_result'], self.version)\n\n if self.cfg['ignore_test_result']:\n # used to maintain compatibility with easyconfigs predating scipy 1.9;\n # runs tests (serially) in a way that exits with code 0 regardless of test results,\n # see https://github.com/easybuilders/easybuild-easyblocks/issues/2237\n self.testcmd = \"cd .. && %(python)s -c 'import numpy; import scipy; scipy.test(verbose=2)'\"\n else:\n self.testcmd = \" && \".join([\n \"cd ..\",\n \"touch %(srcdir)s/.coveragerc\",\n \"%(python)s %(srcdir)s/runtests.py -v --no-build --parallel %(parallel)s\",\n ])\n if self.cfg['enable_slow_tests']:\n self.testcmd += \" -m full \"\n\n def configure_step(self):\n \"\"\"Custom configure step for scipy: set extra installation options when needed.\"\"\"\n\n # scipy >= 1.9.0 uses Meson/Ninja\n if self.use_meson:\n # configure BLAS/LAPACK library to use with Meson for scipy >= 1.9.0\n lapack_lib = self.toolchain.lapack_family()\n if lapack_lib == toolchain.FLEXIBLAS:\n blas_lapack = 'flexiblas'\n elif lapack_lib == toolchain.INTELMKL:\n blas_lapack = 'mkl'\n elif lapack_lib == toolchain.OPENBLAS:\n blas_lapack = 'openblas'\n else:\n raise EasyBuildError(\"Unknown BLAS/LAPACK library used: %s\", lapack_lib)\n\n for opt in ('blas', 'lapack'):\n self.cfg.update('configopts', '-D%(opt)s=%(blas_lapack)s' % {'opt': opt, 'blas_lapack': blas_lapack})\n\n # need to have already installed extensions in PATH, PYTHONPATH for configure/build/install steps\n pythonpath = os.getenv('PYTHONPATH')\n pylibdir = det_pylibdir()\n env.setvar('PYTHONPATH', os.pathsep.join([os.path.join(self.installdir, pylibdir), pythonpath]))\n\n path = os.getenv('PATH')\n env.setvar('PATH', os.pathsep.join([os.path.join(self.installdir, 'bin'), path]))\n\n MesonNinja.configure_step(self)\n\n else:\n # scipy < 1.9.0 uses install procedure using setup.py\n FortranPythonPackage.configure_step(self)\n\n if LooseVersion(self.version) >= LooseVersion('0.13'):\n # in recent scipy versions, additional compilation is done in the install step,\n # which requires unsetting $LDFLAGS\n if self.toolchain.comp_family() in [toolchain.GCC, toolchain.CLANGGCC]: # @UndefinedVariable\n self.cfg.update('preinstallopts', \"unset LDFLAGS && \")\n\n def build_step(self):\n \"\"\"Custom build step for scipy: use ninja for scipy >= 1.9.0\"\"\"\n if self.use_meson:\n MesonNinja.build_step(self)\n else:\n FortranPythonPackage.build_step(self)\n\n def test_step(self):\n \"\"\"Run available scipy unit tests. Adapted from numpy easyblock\"\"\"\n\n if self.use_meson:\n # temporarily install scipy so we can run the test suite\n tmpdir = tempfile.mkdtemp()\n cwd = os.getcwd()\n\n tmp_builddir = os.path.join(tmpdir, 'build')\n tmp_installdir = os.path.join(tmpdir, 'install')\n\n # create a copy of the builddir\n copy_dir(cwd, tmp_builddir)\n change_dir(tmp_builddir)\n\n # reconfigure (to update prefix), and install to tmpdir\n MesonNinja.configure_step(self, cmd_prefix=tmp_installdir)\n MesonNinja.install_step(self)\n\n tmp_pylibdir = [os.path.join(tmp_installdir, det_pylibdir())]\n self.prepare_python()\n\n self.cfg['pretestopts'] = \" && \".join([\n # LDFLAGS should not be set when testing numpy/scipy, because it overwrites whatever numpy/scipy sets\n # see http://projects.scipy.org/numpy/ticket/182\n \"unset LDFLAGS\",\n \"export PYTHONPATH=%s:$PYTHONPATH\" % tmp_pylibdir,\n \"\",\n ])\n self.cfg['runtest'] = self.testcmd % {\n 'python': self.python_cmd,\n 'srcdir': self.cfg['start_dir'],\n 'parallel': self.cfg['parallel'],\n }\n\n MesonNinja.test_step(self)\n\n else:\n self.testcmd = self.testcmd % {\n 'python': '%(python)s',\n 'srcdir': self.cfg['start_dir'],\n 'parallel': self.cfg['parallel'],\n }\n FortranPythonPackage.test_step(self)\n\n def install_step(self):\n \"\"\"Custom install step for scipy: use ninja for scipy >= 1.9.0\"\"\"\n if self.use_meson:\n MesonNinja.install_step(self)\n else:\n FortranPythonPackage.install_step(self)\n\n def sanity_check_step(self, *args, **kwargs):\n \"\"\"Custom sanity check for scipy.\"\"\"\n\n # can't use self.pylibdir here, need to determine path on the fly using currently active 'python' command;\n # this is important for numpy installations for multiple Python version (via multi_deps)\n custom_paths = {\n 'files': [],\n 'dirs': [det_pylibdir()],\n }\n\n return PythonPackage.sanity_check_step(self, custom_paths=custom_paths)\n", "path": "easybuild/easyblocks/s/scipy.py"}], "after_files": [{"content": "##\n# Copyright 2009-2023 Ghent University\n#\n# This file is part of EasyBuild,\n# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),\n# with support of Ghent University (http://ugent.be/hpc),\n# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),\n# Flemish Research Foundation (FWO) (http://www.fwo.be/en)\n# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).\n#\n# https://github.com/easybuilders/easybuild\n#\n# EasyBuild is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation v2.\n#\n# EasyBuild is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.\n##\n\"\"\"\nEasyBuild support for building and installing scipy, implemented as an easyblock\n\n@author: Stijn De Weirdt (Ghent University)\n@author: Dries Verdegem (Ghent University)\n@author: Kenneth Hoste (Ghent University)\n@author: Pieter De Baets (Ghent University)\n@author: Jens Timmerman (Ghent University)\n@author: Jasper Grimm (University of York)\n\"\"\"\nimport os\nimport tempfile\nfrom distutils.version import LooseVersion\n\nimport easybuild.tools.environment as env\nimport easybuild.tools.toolchain as toolchain\nfrom easybuild.easyblocks.generic.fortranpythonpackage import FortranPythonPackage\nfrom easybuild.easyblocks.generic.mesonninja import MesonNinja\nfrom easybuild.easyblocks.generic.pythonpackage import PythonPackage, det_pylibdir\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.tools.build_log import EasyBuildError, print_warning\nfrom easybuild.tools.filetools import change_dir, copy_dir, copy_file\n\n\nclass EB_scipy(FortranPythonPackage, PythonPackage, MesonNinja):\n \"\"\"Support for installing the scipy Python package as part of a Python installation.\"\"\"\n\n @staticmethod\n def extra_options():\n \"\"\"Easyconfig parameters specific to scipy.\"\"\"\n extra_vars = ({\n 'enable_slow_tests': [False, \"Run scipy test suite, including tests marked as slow\", CUSTOM],\n 'ignore_test_result': [None, \"Run scipy test suite, but ignore test failures (True/False/None). Default \"\n \"(None) implies True for scipy < 1.9, and False for scipy >= 1.9\", CUSTOM],\n })\n\n return PythonPackage.extra_options(extra_vars=extra_vars)\n\n def __init__(self, *args, **kwargs):\n \"\"\"Set scipy-specific test command.\"\"\"\n # calling PythonPackage __init__ also lets MesonNinja work in an extension\n PythonPackage.__init__(self, *args, **kwargs)\n self.testinstall = True\n\n if LooseVersion(self.version) >= LooseVersion('1.9'):\n self.use_meson = True\n\n # enforce scipy test suite results if not explicitly disabled for scipy >= 1.9\n # strip inherited PythonPackage installopts\n installopts = self.cfg['installopts']\n pythonpackage_installopts = ['--no-deps', '--ignore-installed', '--no-index', '--egg',\n '--zip-ok', '--no-index']\n self.log.info(\"Stripping inherited PythonPackage installopts %s from installopts %s\",\n pythonpackage_installopts, installopts)\n for i in pythonpackage_installopts:\n installopts = installopts.replace(i, '')\n self.cfg['installopts'] = installopts\n\n else:\n self.use_meson = False\n\n if self.cfg['ignore_test_result'] is None:\n # automatically ignore scipy test suite results for scipy < 1.9, as we did in older easyconfigs\n self.cfg['ignore_test_result'] = LooseVersion(self.version) < '1.9'\n self.log.info(\"ignore_test_result not specified, so automatically set to %s for scipy %s\",\n self.cfg['ignore_test_result'], self.version)\n\n if self.cfg['ignore_test_result']:\n # used to maintain compatibility with easyconfigs predating scipy 1.9;\n # runs tests (serially) in a way that exits with code 0 regardless of test results,\n # see https://github.com/easybuilders/easybuild-easyblocks/issues/2237\n self.testcmd = \"cd .. && %(python)s -c 'import numpy; import scipy; scipy.test(verbose=2)'\"\n else:\n self.testcmd = \" && \".join([\n \"cd ..\",\n \"touch %(srcdir)s/.coveragerc\",\n \"%(python)s %(srcdir)s/runtests.py -v --no-build --parallel %(parallel)s\",\n ])\n if self.cfg['enable_slow_tests']:\n self.testcmd += \" -m full \"\n\n def configure_step(self):\n \"\"\"Custom configure step for scipy: set extra installation options when needed.\"\"\"\n\n # scipy >= 1.9.0 uses Meson/Ninja\n if self.use_meson:\n # configure BLAS/LAPACK library to use with Meson for scipy >= 1.9.0\n lapack_lib = self.toolchain.lapack_family()\n if lapack_lib == toolchain.FLEXIBLAS:\n blas_lapack = 'flexiblas'\n elif lapack_lib == toolchain.INTELMKL:\n blas_lapack = 'mkl'\n elif lapack_lib == toolchain.OPENBLAS:\n blas_lapack = 'openblas'\n else:\n raise EasyBuildError(\"Unknown BLAS/LAPACK library used: %s\", lapack_lib)\n\n for opt in ('blas', 'lapack'):\n self.cfg.update('configopts', '-D%(opt)s=%(blas_lapack)s' % {'opt': opt, 'blas_lapack': blas_lapack})\n\n # need to have already installed extensions in PATH, PYTHONPATH for configure/build/install steps\n pythonpath = os.getenv('PYTHONPATH')\n pylibdir = det_pylibdir()\n env.setvar('PYTHONPATH', os.pathsep.join([os.path.join(self.installdir, pylibdir), pythonpath]))\n\n path = os.getenv('PATH')\n env.setvar('PATH', os.pathsep.join([os.path.join(self.installdir, 'bin'), path]))\n\n MesonNinja.configure_step(self)\n\n else:\n # scipy < 1.9.0 uses install procedure using setup.py\n FortranPythonPackage.configure_step(self)\n\n if LooseVersion(self.version) >= LooseVersion('0.13'):\n # in recent scipy versions, additional compilation is done in the install step,\n # which requires unsetting $LDFLAGS\n if self.toolchain.comp_family() in [toolchain.GCC, toolchain.CLANGGCC]: # @UndefinedVariable\n self.cfg.update('preinstallopts', \"unset LDFLAGS && \")\n\n def build_step(self):\n \"\"\"Custom build step for scipy: use ninja for scipy >= 1.9.0\"\"\"\n if self.use_meson:\n MesonNinja.build_step(self)\n else:\n FortranPythonPackage.build_step(self)\n\n def test_step(self):\n \"\"\"Run available scipy unit tests. Adapted from numpy easyblock\"\"\"\n\n if self.use_meson:\n # temporarily install scipy so we can run the test suite\n tmpdir = tempfile.mkdtemp()\n cwd = os.getcwd()\n\n tmp_builddir = os.path.join(tmpdir, 'build')\n tmp_installdir = os.path.join(tmpdir, 'install')\n\n # create a copy of the builddir\n copy_dir(cwd, tmp_builddir)\n change_dir(tmp_builddir)\n\n # reconfigure (to update prefix), and install to tmpdir\n MesonNinja.configure_step(self, cmd_prefix=tmp_installdir)\n MesonNinja.install_step(self)\n\n tmp_pylibdir = [os.path.join(tmp_installdir, det_pylibdir())]\n self.prepare_python()\n\n self.cfg['pretestopts'] = \" && \".join([\n # LDFLAGS should not be set when testing numpy/scipy, because it overwrites whatever numpy/scipy sets\n # see http://projects.scipy.org/numpy/ticket/182\n \"unset LDFLAGS\",\n \"export PYTHONPATH=%s:$PYTHONPATH\" % tmp_pylibdir,\n \"\",\n ])\n self.cfg['runtest'] = self.testcmd % {\n 'python': self.python_cmd,\n 'srcdir': self.cfg['start_dir'],\n 'parallel': self.cfg['parallel'],\n }\n\n MesonNinja.test_step(self)\n\n else:\n self.testcmd = self.testcmd % {\n 'python': '%(python)s',\n 'srcdir': self.cfg['start_dir'],\n 'parallel': self.cfg['parallel'],\n }\n FortranPythonPackage.test_step(self)\n\n def install_step(self):\n \"\"\"Custom install step for scipy: use ninja for scipy >= 1.9.0\"\"\"\n if self.use_meson:\n MesonNinja.install_step(self)\n\n # copy PKG-INFO file included in scipy source tarball to scipy-<version>.egg-info in installation,\n # so pip is aware of the scipy installation (required for 'pip list', 'pip check', etc.);\n # see also https://github.com/easybuilders/easybuild-easyblocks/issues/2901\n pkg_info = os.path.join(self.cfg['start_dir'], 'PKG-INFO')\n target_egg_info = os.path.join(self.installdir, self.pylibdir, 'scipy-%s.egg-info' % self.version)\n if os.path.isfile(pkg_info):\n copy_file(pkg_info, target_egg_info)\n else:\n cwd = os.getcwd()\n print_warning(\"%s not found in %s, so can't use it to create %s!\", pkg_info, cwd, target_egg_info,\n log=self.log)\n else:\n FortranPythonPackage.install_step(self)\n\n def sanity_check_step(self, *args, **kwargs):\n \"\"\"Custom sanity check for scipy.\"\"\"\n\n # can't use self.pylibdir here, need to determine path on the fly using currently active 'python' command;\n # this is important for numpy installations for multiple Python version (via multi_deps)\n custom_paths = {\n 'files': [],\n 'dirs': [det_pylibdir()],\n }\n\n # make sure that scipy is included in output of 'pip list',\n # so that 'pip check' passes if scipy is a required dependency for another Python package;\n # use case-insensitive match, since name is sometimes reported as 'SciPy'\n custom_commands = [r\"pip list | grep -iE '^scipy\\s+%s\\s*$'\" % self.version.replace('.', r'\\.')]\n\n return PythonPackage.sanity_check_step(self, custom_paths=custom_paths, custom_commands=custom_commands)\n", "path": "easybuild/easyblocks/s/scipy.py"}]} | 3,045 | 601 |
gh_patches_debug_21937 | rasdani/github-patches | git_diff | TheAlgorithms__Python-9228 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Concatenate/consolidate all algorithms with different implementations
### Feature description
There are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `maths/miller_rabin.py`
Content:
```
1 import random
2
3 from .binary_exp_mod import bin_exp_mod
4
5
6 # This is a probabilistic check to test primality, useful for big numbers!
7 # if it's a prime, it will return true
8 # if it's not a prime, the chance of it returning true is at most 1/4**prec
9 def is_prime_big(n, prec=1000):
10 """
11 >>> from maths.prime_check import is_prime
12 >>> # all(is_prime_big(i) == is_prime(i) for i in range(1000)) # 3.45s
13 >>> all(is_prime_big(i) == is_prime(i) for i in range(256))
14 True
15 """
16 if n < 2:
17 return False
18
19 if n % 2 == 0:
20 return n == 2
21
22 # this means n is odd
23 d = n - 1
24 exp = 0
25 while d % 2 == 0:
26 d /= 2
27 exp += 1
28
29 # n - 1=d*(2**exp)
30 count = 0
31 while count < prec:
32 a = random.randint(2, n - 1)
33 b = bin_exp_mod(a, d, n)
34 if b != 1:
35 flag = True
36 for _ in range(exp):
37 if b == n - 1:
38 flag = False
39 break
40 b = b * b
41 b %= n
42 if flag:
43 return False
44 count += 1
45 return True
46
47
48 if __name__ == "__main__":
49 n = abs(int(input("Enter bound : ").strip()))
50 print("Here's the list of primes:")
51 print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/maths/miller_rabin.py b/maths/miller_rabin.py
deleted file mode 100644
--- a/maths/miller_rabin.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import random
-
-from .binary_exp_mod import bin_exp_mod
-
-
-# This is a probabilistic check to test primality, useful for big numbers!
-# if it's a prime, it will return true
-# if it's not a prime, the chance of it returning true is at most 1/4**prec
-def is_prime_big(n, prec=1000):
- """
- >>> from maths.prime_check import is_prime
- >>> # all(is_prime_big(i) == is_prime(i) for i in range(1000)) # 3.45s
- >>> all(is_prime_big(i) == is_prime(i) for i in range(256))
- True
- """
- if n < 2:
- return False
-
- if n % 2 == 0:
- return n == 2
-
- # this means n is odd
- d = n - 1
- exp = 0
- while d % 2 == 0:
- d /= 2
- exp += 1
-
- # n - 1=d*(2**exp)
- count = 0
- while count < prec:
- a = random.randint(2, n - 1)
- b = bin_exp_mod(a, d, n)
- if b != 1:
- flag = True
- for _ in range(exp):
- if b == n - 1:
- flag = False
- break
- b = b * b
- b %= n
- if flag:
- return False
- count += 1
- return True
-
-
-if __name__ == "__main__":
- n = abs(int(input("Enter bound : ").strip()))
- print("Here's the list of primes:")
- print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
| {"golden_diff": "diff --git a/maths/miller_rabin.py b/maths/miller_rabin.py\ndeleted file mode 100644\n--- a/maths/miller_rabin.py\n+++ /dev/null\n@@ -1,51 +0,0 @@\n-import random\n-\n-from .binary_exp_mod import bin_exp_mod\n-\n-\n-# This is a probabilistic check to test primality, useful for big numbers!\n-# if it's a prime, it will return true\n-# if it's not a prime, the chance of it returning true is at most 1/4**prec\n-def is_prime_big(n, prec=1000):\n- \"\"\"\n- >>> from maths.prime_check import is_prime\n- >>> # all(is_prime_big(i) == is_prime(i) for i in range(1000)) # 3.45s\n- >>> all(is_prime_big(i) == is_prime(i) for i in range(256))\n- True\n- \"\"\"\n- if n < 2:\n- return False\n-\n- if n % 2 == 0:\n- return n == 2\n-\n- # this means n is odd\n- d = n - 1\n- exp = 0\n- while d % 2 == 0:\n- d /= 2\n- exp += 1\n-\n- # n - 1=d*(2**exp)\n- count = 0\n- while count < prec:\n- a = random.randint(2, n - 1)\n- b = bin_exp_mod(a, d, n)\n- if b != 1:\n- flag = True\n- for _ in range(exp):\n- if b == n - 1:\n- flag = False\n- break\n- b = b * b\n- b %= n\n- if flag:\n- return False\n- count += 1\n- return True\n-\n-\n-if __name__ == \"__main__\":\n- n = abs(int(input(\"Enter bound : \").strip()))\n- print(\"Here's the list of primes:\")\n- print(\", \".join(str(i) for i in range(n + 1) if is_prime_big(i)))\n", "issue": "Concatenate/consolidate all algorithms with different implementations\n### Feature description\n\nThere are lots of algorithms with the same concept but different implementations/methods in different files. All these should be moved into one file\n", "before_files": [{"content": "import random\n\nfrom .binary_exp_mod import bin_exp_mod\n\n\n# This is a probabilistic check to test primality, useful for big numbers!\n# if it's a prime, it will return true\n# if it's not a prime, the chance of it returning true is at most 1/4**prec\ndef is_prime_big(n, prec=1000):\n \"\"\"\n >>> from maths.prime_check import is_prime\n >>> # all(is_prime_big(i) == is_prime(i) for i in range(1000)) # 3.45s\n >>> all(is_prime_big(i) == is_prime(i) for i in range(256))\n True\n \"\"\"\n if n < 2:\n return False\n\n if n % 2 == 0:\n return n == 2\n\n # this means n is odd\n d = n - 1\n exp = 0\n while d % 2 == 0:\n d /= 2\n exp += 1\n\n # n - 1=d*(2**exp)\n count = 0\n while count < prec:\n a = random.randint(2, n - 1)\n b = bin_exp_mod(a, d, n)\n if b != 1:\n flag = True\n for _ in range(exp):\n if b == n - 1:\n flag = False\n break\n b = b * b\n b %= n\n if flag:\n return False\n count += 1\n return True\n\n\nif __name__ == \"__main__\":\n n = abs(int(input(\"Enter bound : \").strip()))\n print(\"Here's the list of primes:\")\n print(\", \".join(str(i) for i in range(n + 1) if is_prime_big(i)))\n", "path": "maths/miller_rabin.py"}], "after_files": [{"content": null, "path": "maths/miller_rabin.py"}]} | 800 | 497 |
gh_patches_debug_18928 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-1030 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG]: GradAccumDataloader will ignore the last iter.
### 🐛 Describe the bug
I used the list as dataloader for testing.
```
class GradAccumDataloader:
"""A wrapper for dataloader to enable gradient accumulation by dropping the last incomplete steps.
Note:
The dataloader would drop the last incomplete steps for gradient accumulation.
For example, if a dataloader has 10 batches of data and accumulate size is 4. The model parameters will
be updated only twice at step 4 and step 8. The last two batches of data do not form a complete 4-step cycle.
Thus, they will be automatically skipped by this class. If the dataloader is not standard PyTorch dataloader,
(e.g. Dali dataloader), this class will automatically consume (load data for nothing) the remaining 2 batches.
Args:
dataloader (``Iterable``): Your dataloader object for gradient accumulation.
accumulate_size (int): The number of steps to accumulate gradients.
"""
def __init__(self, dataloader: Iterable, accumulate_size: int) -> None:
self.dataloader = dataloader
self.consume_remain_data = True
self.steps_per_epoch = len(dataloader) - len(dataloader) % accumulate_size
def __getattr__(self, __name: str) -> Any:
return getattr(self.dataloader, __name)
def __len__(self) -> int:
return self.steps_per_epoch
def __iter__(self) -> Iterable:
self._cur_step = 0
self._dataiter = iter(self.dataloader)
return self
def __next__(self) -> Union[Tensor, Tuple[Tensor]]:
if self._cur_step < self.steps_per_epoch:
self._cur_step += 1
if self._cur_step == self.steps_per_epoch and self.consume_remain_data:
# this is to handle non standard pytorch dataloader
# such as dali dataloader
while True:
try:
_ = next(self._dataiter)
except StopIteration:
break
return next(self._dataiter)
else:
raise StopIteration
a = list(range(1,11))
print(len(a), a)
a_ = GradAccumDataloaderTorch(a, accumulate_size=4)
for i, val in enumerate(a_):
print(i, val)
```
the output is:
```
10 [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
0 1
1 2
2 3
3 4
4 5
5 6
6 7
```
### Environment
collosal : pip install colossalai==0.1.4+torch1.10cu11.1 -f https://release.colossalai.org
ubuntu 20.04
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `colossalai/engine/gradient_accumulation/_gradient_accumulation.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- encoding: utf-8 -*-
3
4 from typing import Union
5 import torch.nn as nn
6 from torch import Tensor
7 from typing import Iterable, Any, Tuple
8 from colossalai.nn.optimizer import ColossalaiOptimizer
9 from torch.nn.parallel.distributed import DistributedDataParallel
10 from torch.optim import Optimizer
11 from torch.optim.lr_scheduler import _LRScheduler
12 from torch.utils.data import DataLoader
13 from colossalai.utils import conditional_context
14 from colossalai.engine import BaseGradientHandler
15
16
17 class GradAccumOptimizer(ColossalaiOptimizer):
18 """A wrapper for the optimizer to enable gradient accumulation by skipping the steps
19 before accumulation size is reached.
20
21 Args:
22 optim (:class:`torch.optim.Optimizer`): Your optimizer object for gradient accumulation.
23 accumulate_size (int): The number of steps to accumulate gradients.
24 model (:class:`torch.nn.Module`):
25 Your model object to check if it is DistributedDataParallel for special handling of no_sync() context.
26 """
27
28 def __init__(self, optim: Optimizer, accumulate_size: int, model: nn.Module = None):
29 super().__init__(optim)
30 self.accumulate_size = accumulate_size
31 self.accumulate_step = 0
32
33 # handle pytorch ddp auto all reduce
34 self.model = model
35 self.is_torch_ddp = isinstance(self.model, DistributedDataParallel)
36
37 def zero_grad(self, *args, **kwargs) -> None:
38 """
39 Set all gradients to zero.
40
41 Args:
42 *args: positional arguments for the optimizer wrapped
43 **kwargs: keyword arguments for the optimizer wrapped
44 """
45
46 if self.accumulate_step == 0:
47 self.optim.zero_grad(*args, **kwargs)
48
49 def step(self, *args, **kwargs) -> None:
50 """
51 Update the model parameters.
52
53 Args:
54 *args: positional arguments for the optimizer wrapped
55 **kwargs: keyword arguments for the optimizer wrapped
56 """
57
58 if self.accumulate_step < self.accumulate_size:
59 return None
60 else:
61 self.accumulate_step = 0
62 return self.optim.step(*args, **kwargs)
63
64 def clip_grad_norm(self, model: nn.Module, max_norm: float) -> None:
65 """
66 Clip gradients by norm.
67
68 Args:
69 model (:class:`torch.nn.Module`): a torch module instance
70 max_norm (float): the max norm for gradient clipping
71 """
72
73 if self.accumulate_step < self.accumulate_size:
74 pass
75 else:
76 self.optim.clip_grad_norm(model, max_norm)
77
78 def backward(self, loss: Tensor) -> None:
79 """Execute backward pass.
80
81 Args:
82 loss (:class:`torch.Tensor`): the loss value.
83 """
84
85 self.accumulate_step += 1
86
87 if self.is_torch_ddp:
88 no_sync = self.accumulate_step < self.accumulate_size
89 with conditional_context(self.model.no_sync(), enable=no_sync):
90 scaled_loss = loss / self.accumulate_size
91 self.optim.backward(scaled_loss)
92 else:
93 scaled_loss = loss / self.accumulate_size
94 self.optim.backward(scaled_loss)
95
96 def backward_by_grad(self, tensor: Tensor, grad: Tensor) -> None:
97 """Execute backward pass given the gradients of the output.
98
99 Args:
100 loss (:class:`torch.Tensor`): the loss value.
101 grad (:class:`torch.Tensor`): the output gradient.
102 """
103
104 self.accumulate_step += 1
105 no_sync = self.is_torch_ddp and self.accumulate_step < self.accumulate_size
106
107 if no_sync:
108 with self.model.no_sync():
109 self.optim.backward_by_grad(tensor, grad)
110 else:
111 self.optim.backward_by_grad(tensor, grad)
112
113
114 class GradAccumDataloader:
115 """A wrapper for dataloader to enable gradient accumulation by dropping the last incomplete steps.
116
117 Note:
118 The dataloader would drop the last incomplete steps for gradient accumulation.
119 For example, if a dataloader has 10 batches of data and accumulate size is 4. The model parameters will
120 be updated only twice at step 4 and step 8. The last two batches of data do not form a complete 4-step cycle.
121 Thus, they will be automatically skipped by this class. If the dataloader is not standard PyTorch dataloader,
122 (e.g. Dali dataloader), this class will automatically consume (load data for nothing) the remaining 2 batches.
123
124 Args:
125 dataloader (``Iterable``): Your dataloader object for gradient accumulation.
126 accumulate_size (int): The number of steps to accumulate gradients.
127 """
128
129 def __init__(self, dataloader: Iterable, accumulate_size: int) -> None:
130 self.dataloader = dataloader
131 self.consume_remain_data = not isinstance(dataloader, DataLoader)
132 self.steps_per_epoch = len(dataloader) - len(dataloader) % accumulate_size
133
134 def __getattr__(self, __name: str) -> Any:
135 return getattr(self.dataloader, __name)
136
137 def __len__(self) -> int:
138 return self.steps_per_epoch
139
140 def __iter__(self) -> Iterable:
141 self._cur_step = 0
142 self._dataiter = iter(self.dataloader)
143 return self
144
145 def __next__(self) -> Union[Tensor, Tuple[Tensor]]:
146 if self._cur_step < self.steps_per_epoch:
147 self._cur_step += 1
148
149 if self._cur_step == self.steps_per_epoch and self.consume_remain_data:
150 # this is to handle non standard pytorch dataloader
151 # such as dali dataloader
152 while True:
153 try:
154 _ = next(self._dataiter)
155 except StopIteration:
156 break
157 return next(self._dataiter)
158 else:
159 raise StopIteration
160
161
162 class GradAccumLrSchedulerByStep(_LRScheduler):
163 """A wrapper for the LR scheduler to enable gradient accumulation by skipping the steps
164 before accumulation size is reached.
165
166 Args:
167 lr_scheduler (:class:`torch.optim.lr_scheduler._LRScheduler`):
168 Your ``lr_scheduler`` object for gradient accumulation.
169 accumulate_size (int): The number of steps to accumulate gradients.
170 """
171
172 def __init__(self, lr_scheduler: _LRScheduler, accumulate_size: int) -> None:
173 self.lr_scheduler = lr_scheduler
174 self.accumulate_size = accumulate_size
175 self.accumulate_step = 0
176
177 @staticmethod
178 def compute_effective_steps_per_epoch(dataloader: Iterable, accumulate_size: int) -> int:
179 """
180 Computes the number of effective training iterations. An effective iteration is defined
181 as the the aggregation of <accumulate_size> iterations. For examples, if accumulate_size = 4,
182 then 4 iterations are considered as one effective iteration.
183
184 Args:
185 dataloader (``Iterable``): Your dataloader object for gradient accumulation.
186 accumulate_size (int): The number of steps to accumulate gradients.
187
188 """
189 return len(dataloader) // accumulate_size
190
191 def __getattr__(self, __name: str) -> Any:
192 return getattr(self.lr_scheduler, __name)
193
194 def step(self, *args, **kwargs) -> None:
195 """
196 Update the learning rate.
197
198 Args:
199 *args: positional arguments for the lr scheduler wrapped.
200 **kwargs: keyword arguments for the lr scheduler wrapped.
201 """
202 self.accumulate_step += 1
203 if self.accumulate_step < self.accumulate_size:
204 pass
205 else:
206 self.accumulate_step = 0
207 self.lr_scheduler.step(*args, **kwargs)
208
209 def get_lr(self) -> Tensor:
210 """
211 Compute the next learning rate.
212
213 Returns:
214 Tensor: the upcoming learning rate.
215 """
216
217 return self.lr_scheduler.get_lr()
218
219 def get_last_lr(self) -> Tensor:
220 """
221 Returns the current learning rate.
222
223 Returns:
224 Tensor: the current learning rate.
225 """
226
227 return self.lr_scheduler.get_last_lr()
228
229 def print_lr(self, *args, **kwargs) -> None:
230 """
231 Print he learning rate.
232
233 Args:
234 *args: positional arguments for the lr scheduler wrapped.
235 **kwargs: keyword arguments for the lr scheduler wrapped.
236 """
237 self.lr_scheduler.print_lr(*args, **kwargs)
238
239 def state_dict(self) -> dict:
240 """
241 Returns the states of the lr scheduler as dictionary.
242
243 Returns:
244 dict: the states of the lr scheduler.
245 """
246 return self.lr_scheduler.state_dict()
247
248 def load_state_dict(self, state_dict: dict) -> None:
249 """
250 Load the states of the lr scheduler from a dictionary object.
251
252 Returns:
253 dict: the states of the lr scheduler.
254 """
255 self.lr_scheduler.load_state_dict(state_dict)
256
257
258 class GradAccumGradientHandler:
259 r"""A wrapper for the gradient handler to enable gradient accumulation by skipping the steps
260 before accumulation size is reached.
261
262 Args:
263 grad_handler (:class:`colossalai.engine.BaseGradientHandler`):
264 Your ``gradient_handler`` object for gradient accumulation, would be called when achieving `accumulate_size`.
265 accumulate_size (int): The number of steps to accumulate gradients.
266
267 More details about ``gradient_handlers`` could be found in
268 `Gradient_handler <https://github.com/hpcaitech/ColossalAI/tree/main/colossalai/engine/gradient_handler>`_.
269
270 """
271
272 def __init__(self, grad_handler: BaseGradientHandler, accumulate_size: int) -> None:
273 assert isinstance(grad_handler, BaseGradientHandler), \
274 f'expected grad_handler to be type BaseGradientHandler, but got {type(grad_handler)}'
275 self.grad_handler = grad_handler
276 self.accumulate_size = accumulate_size
277 self.accumulate_step = 0
278
279 def handle_gradient(self) -> None:
280 """
281 Handle gradients reduction only in the last gradient accumulation step.
282 """
283
284 self.accumulate_step += 1
285 if self.accumulate_step < self.accumulate_size:
286 pass
287 else:
288 self.accumulate_step = 0
289 self.grad_handler.handle_gradient()
290
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/colossalai/engine/gradient_accumulation/_gradient_accumulation.py b/colossalai/engine/gradient_accumulation/_gradient_accumulation.py
--- a/colossalai/engine/gradient_accumulation/_gradient_accumulation.py
+++ b/colossalai/engine/gradient_accumulation/_gradient_accumulation.py
@@ -145,6 +145,7 @@
def __next__(self) -> Union[Tensor, Tuple[Tensor]]:
if self._cur_step < self.steps_per_epoch:
self._cur_step += 1
+ data = next(self._dataiter)
if self._cur_step == self.steps_per_epoch and self.consume_remain_data:
# this is to handle non standard pytorch dataloader
@@ -154,7 +155,7 @@
_ = next(self._dataiter)
except StopIteration:
break
- return next(self._dataiter)
+ return data
else:
raise StopIteration
| {"golden_diff": "diff --git a/colossalai/engine/gradient_accumulation/_gradient_accumulation.py b/colossalai/engine/gradient_accumulation/_gradient_accumulation.py\n--- a/colossalai/engine/gradient_accumulation/_gradient_accumulation.py\n+++ b/colossalai/engine/gradient_accumulation/_gradient_accumulation.py\n@@ -145,6 +145,7 @@\n def __next__(self) -> Union[Tensor, Tuple[Tensor]]:\n if self._cur_step < self.steps_per_epoch:\n self._cur_step += 1\n+ data = next(self._dataiter)\n \n if self._cur_step == self.steps_per_epoch and self.consume_remain_data:\n # this is to handle non standard pytorch dataloader\n@@ -154,7 +155,7 @@\n _ = next(self._dataiter)\n except StopIteration:\n break\n- return next(self._dataiter)\n+ return data\n else:\n raise StopIteration\n", "issue": "[BUG]: GradAccumDataloader will ignore the last iter.\n### \ud83d\udc1b Describe the bug\r\n\r\nI used the list as dataloader for testing.\r\n\r\n```\r\nclass GradAccumDataloader:\r\n \"\"\"A wrapper for dataloader to enable gradient accumulation by dropping the last incomplete steps.\r\n\r\n Note:\r\n The dataloader would drop the last incomplete steps for gradient accumulation.\r\n For example, if a dataloader has 10 batches of data and accumulate size is 4. The model parameters will\r\n be updated only twice at step 4 and step 8. The last two batches of data do not form a complete 4-step cycle.\r\n Thus, they will be automatically skipped by this class. If the dataloader is not standard PyTorch dataloader,\r\n (e.g. Dali dataloader), this class will automatically consume (load data for nothing) the remaining 2 batches.\r\n\r\n Args:\r\n dataloader (``Iterable``): Your dataloader object for gradient accumulation.\r\n accumulate_size (int): The number of steps to accumulate gradients.\r\n \"\"\"\r\n\r\n def __init__(self, dataloader: Iterable, accumulate_size: int) -> None:\r\n self.dataloader = dataloader\r\n self.consume_remain_data = True\r\n self.steps_per_epoch = len(dataloader) - len(dataloader) % accumulate_size\r\n\r\n def __getattr__(self, __name: str) -> Any:\r\n return getattr(self.dataloader, __name)\r\n\r\n def __len__(self) -> int:\r\n return self.steps_per_epoch\r\n\r\n def __iter__(self) -> Iterable:\r\n self._cur_step = 0\r\n self._dataiter = iter(self.dataloader)\r\n return self\r\n\r\n def __next__(self) -> Union[Tensor, Tuple[Tensor]]:\r\n if self._cur_step < self.steps_per_epoch:\r\n self._cur_step += 1\r\n\r\n if self._cur_step == self.steps_per_epoch and self.consume_remain_data:\r\n # this is to handle non standard pytorch dataloader\r\n # such as dali dataloader\r\n while True:\r\n try:\r\n _ = next(self._dataiter)\r\n except StopIteration:\r\n break\r\n return next(self._dataiter)\r\n else:\r\n raise StopIteration\r\n\r\na = list(range(1,11))\r\nprint(len(a), a)\r\na_ = GradAccumDataloaderTorch(a, accumulate_size=4)\r\nfor i, val in enumerate(a_):\r\n print(i, val)\r\n```\r\n\r\nthe output is:\r\n```\r\n10 [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\r\n0 1\r\n1 2\r\n2 3\r\n3 4\r\n4 5\r\n5 6\r\n6 7\r\n```\r\n\r\n\r\n\r\n### Environment\r\n\r\ncollosal : pip install colossalai==0.1.4+torch1.10cu11.1 -f https://release.colossalai.org\r\nubuntu 20.04\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nfrom typing import Union\nimport torch.nn as nn\nfrom torch import Tensor\nfrom typing import Iterable, Any, Tuple\nfrom colossalai.nn.optimizer import ColossalaiOptimizer\nfrom torch.nn.parallel.distributed import DistributedDataParallel\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler\nfrom torch.utils.data import DataLoader\nfrom colossalai.utils import conditional_context\nfrom colossalai.engine import BaseGradientHandler\n\n\nclass GradAccumOptimizer(ColossalaiOptimizer):\n \"\"\"A wrapper for the optimizer to enable gradient accumulation by skipping the steps \n before accumulation size is reached.\n\n Args:\n optim (:class:`torch.optim.Optimizer`): Your optimizer object for gradient accumulation.\n accumulate_size (int): The number of steps to accumulate gradients.\n model (:class:`torch.nn.Module`):\n Your model object to check if it is DistributedDataParallel for special handling of no_sync() context.\n \"\"\"\n\n def __init__(self, optim: Optimizer, accumulate_size: int, model: nn.Module = None):\n super().__init__(optim)\n self.accumulate_size = accumulate_size\n self.accumulate_step = 0\n\n # handle pytorch ddp auto all reduce\n self.model = model\n self.is_torch_ddp = isinstance(self.model, DistributedDataParallel)\n\n def zero_grad(self, *args, **kwargs) -> None:\n \"\"\"\n Set all gradients to zero.\n\n Args:\n *args: positional arguments for the optimizer wrapped\n **kwargs: keyword arguments for the optimizer wrapped\n \"\"\"\n\n if self.accumulate_step == 0:\n self.optim.zero_grad(*args, **kwargs)\n\n def step(self, *args, **kwargs) -> None:\n \"\"\"\n Update the model parameters.\n\n Args:\n *args: positional arguments for the optimizer wrapped\n **kwargs: keyword arguments for the optimizer wrapped\n \"\"\"\n\n if self.accumulate_step < self.accumulate_size:\n return None\n else:\n self.accumulate_step = 0\n return self.optim.step(*args, **kwargs)\n\n def clip_grad_norm(self, model: nn.Module, max_norm: float) -> None:\n \"\"\"\n Clip gradients by norm.\n\n Args:\n model (:class:`torch.nn.Module`): a torch module instance\n max_norm (float): the max norm for gradient clipping\n \"\"\"\n\n if self.accumulate_step < self.accumulate_size:\n pass\n else:\n self.optim.clip_grad_norm(model, max_norm)\n\n def backward(self, loss: Tensor) -> None:\n \"\"\"Execute backward pass.\n\n Args:\n loss (:class:`torch.Tensor`): the loss value.\n \"\"\"\n\n self.accumulate_step += 1\n\n if self.is_torch_ddp:\n no_sync = self.accumulate_step < self.accumulate_size\n with conditional_context(self.model.no_sync(), enable=no_sync):\n scaled_loss = loss / self.accumulate_size\n self.optim.backward(scaled_loss)\n else:\n scaled_loss = loss / self.accumulate_size\n self.optim.backward(scaled_loss)\n\n def backward_by_grad(self, tensor: Tensor, grad: Tensor) -> None:\n \"\"\"Execute backward pass given the gradients of the output.\n\n Args:\n loss (:class:`torch.Tensor`): the loss value.\n grad (:class:`torch.Tensor`): the output gradient.\n \"\"\"\n\n self.accumulate_step += 1\n no_sync = self.is_torch_ddp and self.accumulate_step < self.accumulate_size\n\n if no_sync:\n with self.model.no_sync():\n self.optim.backward_by_grad(tensor, grad)\n else:\n self.optim.backward_by_grad(tensor, grad)\n\n\nclass GradAccumDataloader:\n \"\"\"A wrapper for dataloader to enable gradient accumulation by dropping the last incomplete steps.\n\n Note:\n The dataloader would drop the last incomplete steps for gradient accumulation.\n For example, if a dataloader has 10 batches of data and accumulate size is 4. The model parameters will\n be updated only twice at step 4 and step 8. The last two batches of data do not form a complete 4-step cycle.\n Thus, they will be automatically skipped by this class. If the dataloader is not standard PyTorch dataloader,\n (e.g. Dali dataloader), this class will automatically consume (load data for nothing) the remaining 2 batches.\n\n Args:\n dataloader (``Iterable``): Your dataloader object for gradient accumulation.\n accumulate_size (int): The number of steps to accumulate gradients.\n \"\"\"\n\n def __init__(self, dataloader: Iterable, accumulate_size: int) -> None:\n self.dataloader = dataloader\n self.consume_remain_data = not isinstance(dataloader, DataLoader)\n self.steps_per_epoch = len(dataloader) - len(dataloader) % accumulate_size\n\n def __getattr__(self, __name: str) -> Any:\n return getattr(self.dataloader, __name)\n\n def __len__(self) -> int:\n return self.steps_per_epoch\n\n def __iter__(self) -> Iterable:\n self._cur_step = 0\n self._dataiter = iter(self.dataloader)\n return self\n\n def __next__(self) -> Union[Tensor, Tuple[Tensor]]:\n if self._cur_step < self.steps_per_epoch:\n self._cur_step += 1\n\n if self._cur_step == self.steps_per_epoch and self.consume_remain_data:\n # this is to handle non standard pytorch dataloader\n # such as dali dataloader\n while True:\n try:\n _ = next(self._dataiter)\n except StopIteration:\n break\n return next(self._dataiter)\n else:\n raise StopIteration\n\n\nclass GradAccumLrSchedulerByStep(_LRScheduler):\n \"\"\"A wrapper for the LR scheduler to enable gradient accumulation by skipping the steps \n before accumulation size is reached.\n\n Args:\n lr_scheduler (:class:`torch.optim.lr_scheduler._LRScheduler`):\n Your ``lr_scheduler`` object for gradient accumulation.\n accumulate_size (int): The number of steps to accumulate gradients.\n \"\"\"\n\n def __init__(self, lr_scheduler: _LRScheduler, accumulate_size: int) -> None:\n self.lr_scheduler = lr_scheduler\n self.accumulate_size = accumulate_size\n self.accumulate_step = 0\n\n @staticmethod\n def compute_effective_steps_per_epoch(dataloader: Iterable, accumulate_size: int) -> int:\n \"\"\"\n Computes the number of effective training iterations. An effective iteration is defined\n as the the aggregation of <accumulate_size> iterations. For examples, if accumulate_size = 4,\n then 4 iterations are considered as one effective iteration.\n\n Args:\n dataloader (``Iterable``): Your dataloader object for gradient accumulation.\n accumulate_size (int): The number of steps to accumulate gradients.\n\n \"\"\"\n return len(dataloader) // accumulate_size\n\n def __getattr__(self, __name: str) -> Any:\n return getattr(self.lr_scheduler, __name)\n\n def step(self, *args, **kwargs) -> None:\n \"\"\"\n Update the learning rate.\n\n Args:\n *args: positional arguments for the lr scheduler wrapped.\n **kwargs: keyword arguments for the lr scheduler wrapped.\n \"\"\"\n self.accumulate_step += 1\n if self.accumulate_step < self.accumulate_size:\n pass\n else:\n self.accumulate_step = 0\n self.lr_scheduler.step(*args, **kwargs)\n\n def get_lr(self) -> Tensor:\n \"\"\"\n Compute the next learning rate.\n\n Returns:\n Tensor: the upcoming learning rate.\n \"\"\"\n\n return self.lr_scheduler.get_lr()\n\n def get_last_lr(self) -> Tensor:\n \"\"\"\n Returns the current learning rate.\n\n Returns:\n Tensor: the current learning rate.\n \"\"\"\n\n return self.lr_scheduler.get_last_lr()\n\n def print_lr(self, *args, **kwargs) -> None:\n \"\"\"\n Print he learning rate.\n\n Args:\n *args: positional arguments for the lr scheduler wrapped.\n **kwargs: keyword arguments for the lr scheduler wrapped.\n \"\"\"\n self.lr_scheduler.print_lr(*args, **kwargs)\n\n def state_dict(self) -> dict:\n \"\"\"\n Returns the states of the lr scheduler as dictionary.\n\n Returns:\n dict: the states of the lr scheduler.\n \"\"\"\n return self.lr_scheduler.state_dict()\n\n def load_state_dict(self, state_dict: dict) -> None:\n \"\"\"\n Load the states of the lr scheduler from a dictionary object.\n\n Returns:\n dict: the states of the lr scheduler.\n \"\"\"\n self.lr_scheduler.load_state_dict(state_dict)\n\n\nclass GradAccumGradientHandler:\n r\"\"\"A wrapper for the gradient handler to enable gradient accumulation by skipping the steps\n before accumulation size is reached.\n\n Args:\n grad_handler (:class:`colossalai.engine.BaseGradientHandler`):\n Your ``gradient_handler`` object for gradient accumulation, would be called when achieving `accumulate_size`.\n accumulate_size (int): The number of steps to accumulate gradients.\n\n More details about ``gradient_handlers`` could be found in\n `Gradient_handler <https://github.com/hpcaitech/ColossalAI/tree/main/colossalai/engine/gradient_handler>`_.\n\n \"\"\"\n\n def __init__(self, grad_handler: BaseGradientHandler, accumulate_size: int) -> None:\n assert isinstance(grad_handler, BaseGradientHandler), \\\n f'expected grad_handler to be type BaseGradientHandler, but got {type(grad_handler)}'\n self.grad_handler = grad_handler\n self.accumulate_size = accumulate_size\n self.accumulate_step = 0\n\n def handle_gradient(self) -> None:\n \"\"\"\n Handle gradients reduction only in the last gradient accumulation step.\n \"\"\"\n\n self.accumulate_step += 1\n if self.accumulate_step < self.accumulate_size:\n pass\n else:\n self.accumulate_step = 0\n self.grad_handler.handle_gradient()\n", "path": "colossalai/engine/gradient_accumulation/_gradient_accumulation.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\nfrom typing import Union\nimport torch.nn as nn\nfrom torch import Tensor\nfrom typing import Iterable, Any, Tuple\nfrom colossalai.nn.optimizer import ColossalaiOptimizer\nfrom torch.nn.parallel.distributed import DistributedDataParallel\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler\nfrom torch.utils.data import DataLoader\nfrom colossalai.utils import conditional_context\nfrom colossalai.engine import BaseGradientHandler\n\n\nclass GradAccumOptimizer(ColossalaiOptimizer):\n \"\"\"A wrapper for the optimizer to enable gradient accumulation by skipping the steps \n before accumulation size is reached.\n\n Args:\n optim (:class:`torch.optim.Optimizer`): Your optimizer object for gradient accumulation.\n accumulate_size (int): The number of steps to accumulate gradients.\n model (:class:`torch.nn.Module`):\n Your model object to check if it is DistributedDataParallel for special handling of no_sync() context.\n \"\"\"\n\n def __init__(self, optim: Optimizer, accumulate_size: int, model: nn.Module = None):\n super().__init__(optim)\n self.accumulate_size = accumulate_size\n self.accumulate_step = 0\n\n # handle pytorch ddp auto all reduce\n self.model = model\n self.is_torch_ddp = isinstance(self.model, DistributedDataParallel)\n\n def zero_grad(self, *args, **kwargs) -> None:\n \"\"\"\n Set all gradients to zero.\n\n Args:\n *args: positional arguments for the optimizer wrapped\n **kwargs: keyword arguments for the optimizer wrapped\n \"\"\"\n\n if self.accumulate_step == 0:\n self.optim.zero_grad(*args, **kwargs)\n\n def step(self, *args, **kwargs) -> None:\n \"\"\"\n Update the model parameters.\n\n Args:\n *args: positional arguments for the optimizer wrapped\n **kwargs: keyword arguments for the optimizer wrapped\n \"\"\"\n\n if self.accumulate_step < self.accumulate_size:\n return None\n else:\n self.accumulate_step = 0\n return self.optim.step(*args, **kwargs)\n\n def clip_grad_norm(self, model: nn.Module, max_norm: float) -> None:\n \"\"\"\n Clip gradients by norm.\n\n Args:\n model (:class:`torch.nn.Module`): a torch module instance\n max_norm (float): the max norm for gradient clipping\n \"\"\"\n\n if self.accumulate_step < self.accumulate_size:\n pass\n else:\n self.optim.clip_grad_norm(model, max_norm)\n\n def backward(self, loss: Tensor) -> None:\n \"\"\"Execute backward pass.\n\n Args:\n loss (:class:`torch.Tensor`): the loss value.\n \"\"\"\n\n self.accumulate_step += 1\n\n if self.is_torch_ddp:\n no_sync = self.accumulate_step < self.accumulate_size\n with conditional_context(self.model.no_sync(), enable=no_sync):\n scaled_loss = loss / self.accumulate_size\n self.optim.backward(scaled_loss)\n else:\n scaled_loss = loss / self.accumulate_size\n self.optim.backward(scaled_loss)\n\n def backward_by_grad(self, tensor: Tensor, grad: Tensor) -> None:\n \"\"\"Execute backward pass given the gradients of the output.\n\n Args:\n loss (:class:`torch.Tensor`): the loss value.\n grad (:class:`torch.Tensor`): the output gradient.\n \"\"\"\n\n self.accumulate_step += 1\n no_sync = self.is_torch_ddp and self.accumulate_step < self.accumulate_size\n\n if no_sync:\n with self.model.no_sync():\n self.optim.backward_by_grad(tensor, grad)\n else:\n self.optim.backward_by_grad(tensor, grad)\n\n\nclass GradAccumDataloader:\n \"\"\"A wrapper for dataloader to enable gradient accumulation by dropping the last incomplete steps.\n\n Note:\n The dataloader would drop the last incomplete steps for gradient accumulation.\n For example, if a dataloader has 10 batches of data and accumulate size is 4. The model parameters will\n be updated only twice at step 4 and step 8. The last two batches of data do not form a complete 4-step cycle.\n Thus, they will be automatically skipped by this class. If the dataloader is not standard PyTorch dataloader,\n (e.g. Dali dataloader), this class will automatically consume (load data for nothing) the remaining 2 batches.\n\n Args:\n dataloader (``Iterable``): Your dataloader object for gradient accumulation.\n accumulate_size (int): The number of steps to accumulate gradients.\n \"\"\"\n\n def __init__(self, dataloader: Iterable, accumulate_size: int) -> None:\n self.dataloader = dataloader\n self.consume_remain_data = not isinstance(dataloader, DataLoader)\n self.steps_per_epoch = len(dataloader) - len(dataloader) % accumulate_size\n\n def __getattr__(self, __name: str) -> Any:\n return getattr(self.dataloader, __name)\n\n def __len__(self) -> int:\n return self.steps_per_epoch\n\n def __iter__(self) -> Iterable:\n self._cur_step = 0\n self._dataiter = iter(self.dataloader)\n return self\n\n def __next__(self) -> Union[Tensor, Tuple[Tensor]]:\n if self._cur_step < self.steps_per_epoch:\n self._cur_step += 1\n data = next(self._dataiter)\n\n if self._cur_step == self.steps_per_epoch and self.consume_remain_data:\n # this is to handle non standard pytorch dataloader\n # such as dali dataloader\n while True:\n try:\n _ = next(self._dataiter)\n except StopIteration:\n break\n return data\n else:\n raise StopIteration\n\n\nclass GradAccumLrSchedulerByStep(_LRScheduler):\n \"\"\"A wrapper for the LR scheduler to enable gradient accumulation by skipping the steps \n before accumulation size is reached.\n\n Args:\n lr_scheduler (:class:`torch.optim.lr_scheduler._LRScheduler`):\n Your ``lr_scheduler`` object for gradient accumulation.\n accumulate_size (int): The number of steps to accumulate gradients.\n \"\"\"\n\n def __init__(self, lr_scheduler: _LRScheduler, accumulate_size: int) -> None:\n self.lr_scheduler = lr_scheduler\n self.accumulate_size = accumulate_size\n self.accumulate_step = 0\n\n @staticmethod\n def compute_effective_steps_per_epoch(dataloader: Iterable, accumulate_size: int) -> int:\n \"\"\"\n Computes the number of effective training iterations. An effective iteration is defined\n as the the aggregation of <accumulate_size> iterations. For examples, if accumulate_size = 4,\n then 4 iterations are considered as one effective iteration.\n\n Args:\n dataloader (``Iterable``): Your dataloader object for gradient accumulation.\n accumulate_size (int): The number of steps to accumulate gradients.\n\n \"\"\"\n return len(dataloader) // accumulate_size\n\n def __getattr__(self, __name: str) -> Any:\n return getattr(self.lr_scheduler, __name)\n\n def step(self, *args, **kwargs) -> None:\n \"\"\"\n Update the learning rate.\n\n Args:\n *args: positional arguments for the lr scheduler wrapped.\n **kwargs: keyword arguments for the lr scheduler wrapped.\n \"\"\"\n self.accumulate_step += 1\n if self.accumulate_step < self.accumulate_size:\n pass\n else:\n self.accumulate_step = 0\n self.lr_scheduler.step(*args, **kwargs)\n\n def get_lr(self) -> Tensor:\n \"\"\"\n Compute the next learning rate.\n\n Returns:\n Tensor: the upcoming learning rate.\n \"\"\"\n\n return self.lr_scheduler.get_lr()\n\n def get_last_lr(self) -> Tensor:\n \"\"\"\n Returns the current learning rate.\n\n Returns:\n Tensor: the current learning rate.\n \"\"\"\n\n return self.lr_scheduler.get_last_lr()\n\n def print_lr(self, *args, **kwargs) -> None:\n \"\"\"\n Print he learning rate.\n\n Args:\n *args: positional arguments for the lr scheduler wrapped.\n **kwargs: keyword arguments for the lr scheduler wrapped.\n \"\"\"\n self.lr_scheduler.print_lr(*args, **kwargs)\n\n def state_dict(self) -> dict:\n \"\"\"\n Returns the states of the lr scheduler as dictionary.\n\n Returns:\n dict: the states of the lr scheduler.\n \"\"\"\n return self.lr_scheduler.state_dict()\n\n def load_state_dict(self, state_dict: dict) -> None:\n \"\"\"\n Load the states of the lr scheduler from a dictionary object.\n\n Returns:\n dict: the states of the lr scheduler.\n \"\"\"\n self.lr_scheduler.load_state_dict(state_dict)\n\n\nclass GradAccumGradientHandler:\n r\"\"\"A wrapper for the gradient handler to enable gradient accumulation by skipping the steps\n before accumulation size is reached.\n\n Args:\n grad_handler (:class:`colossalai.engine.BaseGradientHandler`):\n Your ``gradient_handler`` object for gradient accumulation, would be called when achieving `accumulate_size`.\n accumulate_size (int): The number of steps to accumulate gradients.\n\n More details about ``gradient_handlers`` could be found in\n `Gradient_handler <https://github.com/hpcaitech/ColossalAI/tree/main/colossalai/engine/gradient_handler>`_.\n\n \"\"\"\n\n def __init__(self, grad_handler: BaseGradientHandler, accumulate_size: int) -> None:\n assert isinstance(grad_handler, BaseGradientHandler), \\\n f'expected grad_handler to be type BaseGradientHandler, but got {type(grad_handler)}'\n self.grad_handler = grad_handler\n self.accumulate_size = accumulate_size\n self.accumulate_step = 0\n\n def handle_gradient(self) -> None:\n \"\"\"\n Handle gradients reduction only in the last gradient accumulation step.\n \"\"\"\n\n self.accumulate_step += 1\n if self.accumulate_step < self.accumulate_size:\n pass\n else:\n self.accumulate_step = 0\n self.grad_handler.handle_gradient()\n", "path": "colossalai/engine/gradient_accumulation/_gradient_accumulation.py"}]} | 3,875 | 214 |
gh_patches_debug_9138 | rasdani/github-patches | git_diff | keras-team__autokeras-277 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cannot install autokeras because of package dependency confliction
### Bug Description
following package dependency is configured at setup.py
https://github.com/jhfjhfj1/autokeras/blob/master/setup.py#L6
```
install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',
'tensorflow>=1.10.0', 'tqdm==4.25.0'],
```
When execute `pip install autokeras`, following error is appeared.
```
keras 2.2.2 has requirement keras-applications==1.0.4, but you'll have keras-applications 1.0.6 which is incompatible.
keras 2.2.2 has requirement keras-preprocessing==1.0.2, but you'll have keras-preprocessing 1.0.5 which is incompatible.
```
It is because that tensorflow==1.11.0 is installed first and
keras-applications >= 1.0.5 and keras-preprocessing > = 1.0.3 can installed with tensorflow==1.11.0.
On the other hand, keras==2.2.2's dependency versions are keras-applications==1.0.4 and keras-preprocessing==1.0.2.
tensorflow version should be defined as `tensorflow==1.10.0`at [setup.py L6](https://github.com/jhfjhfj1/autokeras/blob/master/setup.py#L6).
```
# before
install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',
'tensorflow>=1.10.0', 'tqdm==4.25.0'],
# after
install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',
'tensorflow==1.10.0', 'tqdm==4.25.0'],
```
### Reproducing Steps
Step1: curl https://gist.githubusercontent.com/chie8842/b3b9f3ea2d886bbb5aa5c903b9e42ee3/raw/e94cc375ca1265c66d4517a25a748f1e13a3de9d/Dockerfile -o Dockerfile
Step2: docker build -t autokeras -f Dockerfile .
Step3: docker run -it --rm autokeras /bin/bash
Step4: sudo pip install autokeras
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from distutils.core import setup
2
3 setup(
4 name='autokeras',
5 packages=['autokeras'], # this must be the same as the name above
6 install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',
7 'tensorflow>=1.10.0', 'tqdm==4.25.0'],
8 version='0.2.18',
9 description='AutoML for deep learning',
10 author='Haifeng Jin',
11 author_email='[email protected]',
12 url='http://autokeras.com',
13 download_url='https://github.com/jhfjhfj1/autokeras/archive/0.2.18.tar.gz',
14 keywords=['automl'], # arbitrary keywords
15 classifiers=[]
16 )
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,7 +4,7 @@
name='autokeras',
packages=['autokeras'], # this must be the same as the name above
install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',
- 'tensorflow>=1.10.0', 'tqdm==4.25.0'],
+ 'tensorflow==1.10.0', 'tqdm==4.25.0'],
version='0.2.18',
description='AutoML for deep learning',
author='Haifeng Jin',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -4,7 +4,7 @@\n name='autokeras',\n packages=['autokeras'], # this must be the same as the name above\n install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',\n- 'tensorflow>=1.10.0', 'tqdm==4.25.0'],\n+ 'tensorflow==1.10.0', 'tqdm==4.25.0'],\n version='0.2.18',\n description='AutoML for deep learning',\n author='Haifeng Jin',\n", "issue": "cannot install autokeras because of package dependency confliction\n### Bug Description\r\nfollowing package dependency is configured at setup.py\r\nhttps://github.com/jhfjhfj1/autokeras/blob/master/setup.py#L6\r\n\r\n```\r\ninstall_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',\r\n 'tensorflow>=1.10.0', 'tqdm==4.25.0'],\r\n```\r\n\r\nWhen execute `pip install autokeras`, following error is appeared.\r\n\r\n```\r\nkeras 2.2.2 has requirement keras-applications==1.0.4, but you'll have keras-applications 1.0.6 which is incompatible.\r\nkeras 2.2.2 has requirement keras-preprocessing==1.0.2, but you'll have keras-preprocessing 1.0.5 which is incompatible.\r\n```\r\n\r\nIt is because that tensorflow==1.11.0 is installed first and\r\nkeras-applications >= 1.0.5 and keras-preprocessing > = 1.0.3 can installed with tensorflow==1.11.0.\r\nOn the other hand, keras==2.2.2's dependency versions are keras-applications==1.0.4 and keras-preprocessing==1.0.2.\r\n\r\n tensorflow version should be defined as `tensorflow==1.10.0`at [setup.py L6](https://github.com/jhfjhfj1/autokeras/blob/master/setup.py#L6).\r\n\r\n```\r\n# before\r\ninstall_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',\r\n 'tensorflow>=1.10.0', 'tqdm==4.25.0'],\r\n\r\n# after\r\ninstall_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',\r\n 'tensorflow==1.10.0', 'tqdm==4.25.0'],\r\n```\r\n\r\n### Reproducing Steps\r\n\u00a0\r\nStep1: curl https://gist.githubusercontent.com/chie8842/b3b9f3ea2d886bbb5aa5c903b9e42ee3/raw/e94cc375ca1265c66d4517a25a748f1e13a3de9d/Dockerfile -o Dockerfile\r\nStep2: docker build -t autokeras -f Dockerfile .\r\nStep3: docker run -it --rm autokeras /bin/bash\r\nStep4: sudo pip install autokeras\n", "before_files": [{"content": "from distutils.core import setup\n\nsetup(\n name='autokeras',\n packages=['autokeras'], # this must be the same as the name above\n install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',\n 'tensorflow>=1.10.0', 'tqdm==4.25.0'],\n version='0.2.18',\n description='AutoML for deep learning',\n author='Haifeng Jin',\n author_email='[email protected]',\n url='http://autokeras.com',\n download_url='https://github.com/jhfjhfj1/autokeras/archive/0.2.18.tar.gz',\n keywords=['automl'], # arbitrary keywords\n classifiers=[]\n)\n", "path": "setup.py"}], "after_files": [{"content": "from distutils.core import setup\n\nsetup(\n name='autokeras',\n packages=['autokeras'], # this must be the same as the name above\n install_requires=['torch==0.4.1', 'torchvision==0.2.1', 'numpy>=1.14.5', 'keras==2.2.2', 'scikit-learn==0.19.1',\n 'tensorflow==1.10.0', 'tqdm==4.25.0'],\n version='0.2.18',\n description='AutoML for deep learning',\n author='Haifeng Jin',\n author_email='[email protected]',\n url='http://autokeras.com',\n download_url='https://github.com/jhfjhfj1/autokeras/archive/0.2.18.tar.gz',\n keywords=['automl'], # arbitrary keywords\n classifiers=[]\n)\n", "path": "setup.py"}]} | 1,144 | 186 |
gh_patches_debug_17732 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1381 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Import from Goodreads doesn't work correctly
**Describe the bug**
Import from goodreads csv imports only first line of csv and stops with 'success' status. If user tries to reimport same csv again importer takes the same first imported line yet again.
Broken import examples https://bookwyrm.social/import/775 https://bookwyrm.social/import/776
**Expected behavior**
Importer correctly imports all lines of csv or returns error message to user
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/import_data.py`
Content:
```
1 """ import books from another app """
2 from io import TextIOWrapper
3
4 from django.contrib.auth.decorators import login_required
5 from django.core.exceptions import PermissionDenied
6 from django.http import HttpResponseBadRequest
7 from django.shortcuts import get_object_or_404, redirect
8 from django.template.response import TemplateResponse
9 from django.utils.decorators import method_decorator
10 from django.utils.translation import gettext_lazy as _
11 from django.views import View
12
13 from bookwyrm import forms, models
14 from bookwyrm.importers import (
15 Importer,
16 LibrarythingImporter,
17 GoodreadsImporter,
18 StorygraphImporter,
19 )
20 from bookwyrm.tasks import app
21
22 # pylint: disable= no-self-use
23 @method_decorator(login_required, name="dispatch")
24 class Import(View):
25 """import view"""
26
27 def get(self, request):
28 """load import page"""
29 return TemplateResponse(
30 request,
31 "import.html",
32 {
33 "import_form": forms.ImportForm(),
34 "jobs": models.ImportJob.objects.filter(user=request.user).order_by(
35 "-created_date"
36 ),
37 },
38 )
39
40 def post(self, request):
41 """ingest a goodreads csv"""
42 form = forms.ImportForm(request.POST, request.FILES)
43 if form.is_valid():
44 include_reviews = request.POST.get("include_reviews") == "on"
45 privacy = request.POST.get("privacy")
46 source = request.POST.get("source")
47
48 importer = None
49 if source == "LibraryThing":
50 importer = LibrarythingImporter()
51 elif source == "Storygraph":
52 importer = StorygraphImporter()
53 else:
54 # Default : GoodReads
55 importer = GoodreadsImporter()
56
57 try:
58 job = importer.create_job(
59 request.user,
60 TextIOWrapper(
61 request.FILES["csv_file"], encoding=importer.encoding
62 ),
63 include_reviews,
64 privacy,
65 )
66 except (UnicodeDecodeError, ValueError, KeyError):
67 return HttpResponseBadRequest(_("Not a valid csv file"))
68
69 importer.start_import(job)
70
71 return redirect("/import/%d" % job.id)
72 return HttpResponseBadRequest()
73
74
75 @method_decorator(login_required, name="dispatch")
76 class ImportStatus(View):
77 """status of an existing import"""
78
79 def get(self, request, job_id):
80 """status of an import job"""
81 job = get_object_or_404(models.ImportJob, id=job_id)
82 if job.user != request.user:
83 raise PermissionDenied
84
85 try:
86 task = app.AsyncResult(job.task_id)
87 # triggers attribute error if the task won't load
88 task.status # pylint: disable=pointless-statement
89 except (ValueError, AttributeError):
90 task = None
91
92 items = job.items.order_by("index").all()
93 failed_items = [i for i in items if i.fail_reason]
94 items = [i for i in items if not i.fail_reason]
95 return TemplateResponse(
96 request,
97 "import_status.html",
98 {"job": job, "items": items, "failed_items": failed_items, "task": task},
99 )
100
101 def post(self, request, job_id):
102 """retry lines from an import"""
103 job = get_object_or_404(models.ImportJob, id=job_id)
104 items = []
105 for item in request.POST.getlist("import_item"):
106 items.append(get_object_or_404(models.ImportItem, id=item))
107
108 importer = Importer()
109 job = importer.create_retry_job(
110 request.user,
111 job,
112 items,
113 )
114 importer.start_import(job)
115 return redirect("/import/%d" % job.id)
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/views/import_data.py b/bookwyrm/views/import_data.py
--- a/bookwyrm/views/import_data.py
+++ b/bookwyrm/views/import_data.py
@@ -28,7 +28,7 @@
"""load import page"""
return TemplateResponse(
request,
- "import.html",
+ "import/import.html",
{
"import_form": forms.ImportForm(),
"jobs": models.ImportJob.objects.filter(user=request.user).order_by(
@@ -94,7 +94,7 @@
items = [i for i in items if not i.fail_reason]
return TemplateResponse(
request,
- "import_status.html",
+ "import/import_status.html",
{"job": job, "items": items, "failed_items": failed_items, "task": task},
)
| {"golden_diff": "diff --git a/bookwyrm/views/import_data.py b/bookwyrm/views/import_data.py\n--- a/bookwyrm/views/import_data.py\n+++ b/bookwyrm/views/import_data.py\n@@ -28,7 +28,7 @@\n \"\"\"load import page\"\"\"\n return TemplateResponse(\n request,\n- \"import.html\",\n+ \"import/import.html\",\n {\n \"import_form\": forms.ImportForm(),\n \"jobs\": models.ImportJob.objects.filter(user=request.user).order_by(\n@@ -94,7 +94,7 @@\n items = [i for i in items if not i.fail_reason]\n return TemplateResponse(\n request,\n- \"import_status.html\",\n+ \"import/import_status.html\",\n {\"job\": job, \"items\": items, \"failed_items\": failed_items, \"task\": task},\n )\n", "issue": "Import from Goodreads doesn't work correctly\n**Describe the bug**\r\n\r\nImport from goodreads csv imports only first line of csv and stops with 'success' status. If user tries to reimport same csv again importer takes the same first imported line yet again. \r\n\r\nBroken import examples https://bookwyrm.social/import/775 https://bookwyrm.social/import/776\r\n\r\n**Expected behavior**\r\nImporter correctly imports all lines of csv or returns error message to user\n", "before_files": [{"content": "\"\"\" import books from another app \"\"\"\nfrom io import TextIOWrapper\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.importers import (\n Importer,\n LibrarythingImporter,\n GoodreadsImporter,\n StorygraphImporter,\n)\nfrom bookwyrm.tasks import app\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass Import(View):\n \"\"\"import view\"\"\"\n\n def get(self, request):\n \"\"\"load import page\"\"\"\n return TemplateResponse(\n request,\n \"import.html\",\n {\n \"import_form\": forms.ImportForm(),\n \"jobs\": models.ImportJob.objects.filter(user=request.user).order_by(\n \"-created_date\"\n ),\n },\n )\n\n def post(self, request):\n \"\"\"ingest a goodreads csv\"\"\"\n form = forms.ImportForm(request.POST, request.FILES)\n if form.is_valid():\n include_reviews = request.POST.get(\"include_reviews\") == \"on\"\n privacy = request.POST.get(\"privacy\")\n source = request.POST.get(\"source\")\n\n importer = None\n if source == \"LibraryThing\":\n importer = LibrarythingImporter()\n elif source == \"Storygraph\":\n importer = StorygraphImporter()\n else:\n # Default : GoodReads\n importer = GoodreadsImporter()\n\n try:\n job = importer.create_job(\n request.user,\n TextIOWrapper(\n request.FILES[\"csv_file\"], encoding=importer.encoding\n ),\n include_reviews,\n privacy,\n )\n except (UnicodeDecodeError, ValueError, KeyError):\n return HttpResponseBadRequest(_(\"Not a valid csv file\"))\n\n importer.start_import(job)\n\n return redirect(\"/import/%d\" % job.id)\n return HttpResponseBadRequest()\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass ImportStatus(View):\n \"\"\"status of an existing import\"\"\"\n\n def get(self, request, job_id):\n \"\"\"status of an import job\"\"\"\n job = get_object_or_404(models.ImportJob, id=job_id)\n if job.user != request.user:\n raise PermissionDenied\n\n try:\n task = app.AsyncResult(job.task_id)\n # triggers attribute error if the task won't load\n task.status # pylint: disable=pointless-statement\n except (ValueError, AttributeError):\n task = None\n\n items = job.items.order_by(\"index\").all()\n failed_items = [i for i in items if i.fail_reason]\n items = [i for i in items if not i.fail_reason]\n return TemplateResponse(\n request,\n \"import_status.html\",\n {\"job\": job, \"items\": items, \"failed_items\": failed_items, \"task\": task},\n )\n\n def post(self, request, job_id):\n \"\"\"retry lines from an import\"\"\"\n job = get_object_or_404(models.ImportJob, id=job_id)\n items = []\n for item in request.POST.getlist(\"import_item\"):\n items.append(get_object_or_404(models.ImportItem, id=item))\n\n importer = Importer()\n job = importer.create_retry_job(\n request.user,\n job,\n items,\n )\n importer.start_import(job)\n return redirect(\"/import/%d\" % job.id)\n", "path": "bookwyrm/views/import_data.py"}], "after_files": [{"content": "\"\"\" import books from another app \"\"\"\nfrom io import TextIOWrapper\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.importers import (\n Importer,\n LibrarythingImporter,\n GoodreadsImporter,\n StorygraphImporter,\n)\nfrom bookwyrm.tasks import app\n\n# pylint: disable= no-self-use\n@method_decorator(login_required, name=\"dispatch\")\nclass Import(View):\n \"\"\"import view\"\"\"\n\n def get(self, request):\n \"\"\"load import page\"\"\"\n return TemplateResponse(\n request,\n \"import/import.html\",\n {\n \"import_form\": forms.ImportForm(),\n \"jobs\": models.ImportJob.objects.filter(user=request.user).order_by(\n \"-created_date\"\n ),\n },\n )\n\n def post(self, request):\n \"\"\"ingest a goodreads csv\"\"\"\n form = forms.ImportForm(request.POST, request.FILES)\n if form.is_valid():\n include_reviews = request.POST.get(\"include_reviews\") == \"on\"\n privacy = request.POST.get(\"privacy\")\n source = request.POST.get(\"source\")\n\n importer = None\n if source == \"LibraryThing\":\n importer = LibrarythingImporter()\n elif source == \"Storygraph\":\n importer = StorygraphImporter()\n else:\n # Default : GoodReads\n importer = GoodreadsImporter()\n\n try:\n job = importer.create_job(\n request.user,\n TextIOWrapper(\n request.FILES[\"csv_file\"], encoding=importer.encoding\n ),\n include_reviews,\n privacy,\n )\n except (UnicodeDecodeError, ValueError, KeyError):\n return HttpResponseBadRequest(_(\"Not a valid csv file\"))\n\n importer.start_import(job)\n\n return redirect(\"/import/%d\" % job.id)\n return HttpResponseBadRequest()\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass ImportStatus(View):\n \"\"\"status of an existing import\"\"\"\n\n def get(self, request, job_id):\n \"\"\"status of an import job\"\"\"\n job = get_object_or_404(models.ImportJob, id=job_id)\n if job.user != request.user:\n raise PermissionDenied\n\n try:\n task = app.AsyncResult(job.task_id)\n # triggers attribute error if the task won't load\n task.status # pylint: disable=pointless-statement\n except (ValueError, AttributeError):\n task = None\n\n items = job.items.order_by(\"index\").all()\n failed_items = [i for i in items if i.fail_reason]\n items = [i for i in items if not i.fail_reason]\n return TemplateResponse(\n request,\n \"import/import_status.html\",\n {\"job\": job, \"items\": items, \"failed_items\": failed_items, \"task\": task},\n )\n\n def post(self, request, job_id):\n \"\"\"retry lines from an import\"\"\"\n job = get_object_or_404(models.ImportJob, id=job_id)\n items = []\n for item in request.POST.getlist(\"import_item\"):\n items.append(get_object_or_404(models.ImportItem, id=item))\n\n importer = Importer()\n job = importer.create_retry_job(\n request.user,\n job,\n items,\n )\n importer.start_import(job)\n return redirect(\"/import/%d\" % job.id)\n", "path": "bookwyrm/views/import_data.py"}]} | 1,366 | 181 |
gh_patches_debug_35754 | rasdani/github-patches | git_diff | beetbox__beets-1595 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plexupdate: Doesn't work with libaries not named "Music"
I've named my music libaries `Music (New)` and `Music (Untagged)`. The plex update plugin should update the `Music (New)` section, but instead of updating at least both music libaries it doesn't update anything. If I change the library name from `Music (New)` to `Music` it works like a charm. This is specified on line 33 of the beets plugin. A config option to add libraries other than `Music` would make sense imo.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beetsplug/plexupdate.py`
Content:
```
1 """Updates an Plex library whenever the beets library is changed.
2
3 Plex Home users enter the Plex Token to enable updating.
4 Put something like the following in your config.yaml to configure:
5 plex:
6 host: localhost
7 port: 32400
8 token: token
9 """
10 from __future__ import (division, absolute_import, print_function,
11 unicode_literals)
12
13 import requests
14 from urlparse import urljoin
15 from urllib import urlencode
16 import xml.etree.ElementTree as ET
17 from beets import config
18 from beets.plugins import BeetsPlugin
19
20
21 def get_music_section(host, port, token):
22 """Getting the section key for the music library in Plex.
23 """
24 api_endpoint = append_token('library/sections', token)
25 url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
26
27 # Sends request.
28 r = requests.get(url)
29
30 # Parse xml tree and extract music section key.
31 tree = ET.fromstring(r.text)
32 for child in tree.findall('Directory'):
33 if child.get('title') == 'Music':
34 return child.get('key')
35
36
37 def update_plex(host, port, token):
38 """Sends request to the Plex api to start a library refresh.
39 """
40 # Getting section key and build url.
41 section_key = get_music_section(host, port, token)
42 api_endpoint = 'library/sections/{0}/refresh'.format(section_key)
43 api_endpoint = append_token(api_endpoint, token)
44 url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
45
46 # Sends request and returns requests object.
47 r = requests.get(url)
48 return r
49
50
51 def append_token(url, token):
52 """Appends the Plex Home token to the api call if required.
53 """
54 if token:
55 url += '?' + urlencode({'X-Plex-Token': token})
56 return url
57
58
59 class PlexUpdate(BeetsPlugin):
60 def __init__(self):
61 super(PlexUpdate, self).__init__()
62
63 # Adding defaults.
64 config['plex'].add({
65 u'host': u'localhost',
66 u'port': 32400,
67 u'token': u''})
68
69 self.register_listener('database_change', self.listen_for_db_change)
70
71 def listen_for_db_change(self, lib, model):
72 """Listens for beets db change and register the update for the end"""
73 self.register_listener('cli_exit', self.update)
74
75 def update(self, lib):
76 """When the client exists try to send refresh request to Plex server.
77 """
78 self._log.info('Updating Plex library...')
79
80 # Try to send update request.
81 try:
82 update_plex(
83 config['plex']['host'].get(),
84 config['plex']['port'].get(),
85 config['plex']['token'].get())
86 self._log.info('... started.')
87
88 except requests.exceptions.RequestException:
89 self._log.warning('Update failed.')
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/beetsplug/plexupdate.py b/beetsplug/plexupdate.py
--- a/beetsplug/plexupdate.py
+++ b/beetsplug/plexupdate.py
@@ -18,7 +18,7 @@
from beets.plugins import BeetsPlugin
-def get_music_section(host, port, token):
+def get_music_section(host, port, token, library_name):
"""Getting the section key for the music library in Plex.
"""
api_endpoint = append_token('library/sections', token)
@@ -30,15 +30,15 @@
# Parse xml tree and extract music section key.
tree = ET.fromstring(r.text)
for child in tree.findall('Directory'):
- if child.get('title') == 'Music':
+ if child.get('title') == library_name:
return child.get('key')
-def update_plex(host, port, token):
+def update_plex(host, port, token, library_name):
"""Sends request to the Plex api to start a library refresh.
"""
# Getting section key and build url.
- section_key = get_music_section(host, port, token)
+ section_key = get_music_section(host, port, token, library_name)
api_endpoint = 'library/sections/{0}/refresh'.format(section_key)
api_endpoint = append_token(api_endpoint, token)
url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)
@@ -64,7 +64,8 @@
config['plex'].add({
u'host': u'localhost',
u'port': 32400,
- u'token': u''})
+ u'token': u'',
+ u'library_name': u'Music'})
self.register_listener('database_change', self.listen_for_db_change)
@@ -82,7 +83,8 @@
update_plex(
config['plex']['host'].get(),
config['plex']['port'].get(),
- config['plex']['token'].get())
+ config['plex']['token'].get(),
+ config['plex']['library_name'].get())
self._log.info('... started.')
except requests.exceptions.RequestException:
| {"golden_diff": "diff --git a/beetsplug/plexupdate.py b/beetsplug/plexupdate.py\n--- a/beetsplug/plexupdate.py\n+++ b/beetsplug/plexupdate.py\n@@ -18,7 +18,7 @@\n from beets.plugins import BeetsPlugin\n \n \n-def get_music_section(host, port, token):\n+def get_music_section(host, port, token, library_name):\n \"\"\"Getting the section key for the music library in Plex.\n \"\"\"\n api_endpoint = append_token('library/sections', token)\n@@ -30,15 +30,15 @@\n # Parse xml tree and extract music section key.\n tree = ET.fromstring(r.text)\n for child in tree.findall('Directory'):\n- if child.get('title') == 'Music':\n+ if child.get('title') == library_name:\n return child.get('key')\n \n \n-def update_plex(host, port, token):\n+def update_plex(host, port, token, library_name):\n \"\"\"Sends request to the Plex api to start a library refresh.\n \"\"\"\n # Getting section key and build url.\n- section_key = get_music_section(host, port, token)\n+ section_key = get_music_section(host, port, token, library_name)\n api_endpoint = 'library/sections/{0}/refresh'.format(section_key)\n api_endpoint = append_token(api_endpoint, token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n@@ -64,7 +64,8 @@\n config['plex'].add({\n u'host': u'localhost',\n u'port': 32400,\n- u'token': u''})\n+ u'token': u'',\n+ u'library_name': u'Music'})\n \n self.register_listener('database_change', self.listen_for_db_change)\n \n@@ -82,7 +83,8 @@\n update_plex(\n config['plex']['host'].get(),\n config['plex']['port'].get(),\n- config['plex']['token'].get())\n+ config['plex']['token'].get(),\n+ config['plex']['library_name'].get())\n self._log.info('... started.')\n \n except requests.exceptions.RequestException:\n", "issue": "plexupdate: Doesn't work with libaries not named \"Music\"\nI've named my music libaries `Music (New)` and `Music (Untagged)`. The plex update plugin should update the `Music (New)` section, but instead of updating at least both music libaries it doesn't update anything. If I change the library name from `Music (New)` to `Music` it works like a charm. This is specified on line 33 of the beets plugin. A config option to add libraries other than `Music` would make sense imo.\n\n", "before_files": [{"content": "\"\"\"Updates an Plex library whenever the beets library is changed.\n\nPlex Home users enter the Plex Token to enable updating.\nPut something like the following in your config.yaml to configure:\n plex:\n host: localhost\n port: 32400\n token: token\n\"\"\"\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport requests\nfrom urlparse import urljoin\nfrom urllib import urlencode\nimport xml.etree.ElementTree as ET\nfrom beets import config\nfrom beets.plugins import BeetsPlugin\n\n\ndef get_music_section(host, port, token):\n \"\"\"Getting the section key for the music library in Plex.\n \"\"\"\n api_endpoint = append_token('library/sections', token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request.\n r = requests.get(url)\n\n # Parse xml tree and extract music section key.\n tree = ET.fromstring(r.text)\n for child in tree.findall('Directory'):\n if child.get('title') == 'Music':\n return child.get('key')\n\n\ndef update_plex(host, port, token):\n \"\"\"Sends request to the Plex api to start a library refresh.\n \"\"\"\n # Getting section key and build url.\n section_key = get_music_section(host, port, token)\n api_endpoint = 'library/sections/{0}/refresh'.format(section_key)\n api_endpoint = append_token(api_endpoint, token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request and returns requests object.\n r = requests.get(url)\n return r\n\n\ndef append_token(url, token):\n \"\"\"Appends the Plex Home token to the api call if required.\n \"\"\"\n if token:\n url += '?' + urlencode({'X-Plex-Token': token})\n return url\n\n\nclass PlexUpdate(BeetsPlugin):\n def __init__(self):\n super(PlexUpdate, self).__init__()\n\n # Adding defaults.\n config['plex'].add({\n u'host': u'localhost',\n u'port': 32400,\n u'token': u''})\n\n self.register_listener('database_change', self.listen_for_db_change)\n\n def listen_for_db_change(self, lib, model):\n \"\"\"Listens for beets db change and register the update for the end\"\"\"\n self.register_listener('cli_exit', self.update)\n\n def update(self, lib):\n \"\"\"When the client exists try to send refresh request to Plex server.\n \"\"\"\n self._log.info('Updating Plex library...')\n\n # Try to send update request.\n try:\n update_plex(\n config['plex']['host'].get(),\n config['plex']['port'].get(),\n config['plex']['token'].get())\n self._log.info('... started.')\n\n except requests.exceptions.RequestException:\n self._log.warning('Update failed.')\n", "path": "beetsplug/plexupdate.py"}], "after_files": [{"content": "\"\"\"Updates an Plex library whenever the beets library is changed.\n\nPlex Home users enter the Plex Token to enable updating.\nPut something like the following in your config.yaml to configure:\n plex:\n host: localhost\n port: 32400\n token: token\n\"\"\"\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport requests\nfrom urlparse import urljoin\nfrom urllib import urlencode\nimport xml.etree.ElementTree as ET\nfrom beets import config\nfrom beets.plugins import BeetsPlugin\n\n\ndef get_music_section(host, port, token, library_name):\n \"\"\"Getting the section key for the music library in Plex.\n \"\"\"\n api_endpoint = append_token('library/sections', token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request.\n r = requests.get(url)\n\n # Parse xml tree and extract music section key.\n tree = ET.fromstring(r.text)\n for child in tree.findall('Directory'):\n if child.get('title') == library_name:\n return child.get('key')\n\n\ndef update_plex(host, port, token, library_name):\n \"\"\"Sends request to the Plex api to start a library refresh.\n \"\"\"\n # Getting section key and build url.\n section_key = get_music_section(host, port, token, library_name)\n api_endpoint = 'library/sections/{0}/refresh'.format(section_key)\n api_endpoint = append_token(api_endpoint, token)\n url = urljoin('http://{0}:{1}'.format(host, port), api_endpoint)\n\n # Sends request and returns requests object.\n r = requests.get(url)\n return r\n\n\ndef append_token(url, token):\n \"\"\"Appends the Plex Home token to the api call if required.\n \"\"\"\n if token:\n url += '?' + urlencode({'X-Plex-Token': token})\n return url\n\n\nclass PlexUpdate(BeetsPlugin):\n def __init__(self):\n super(PlexUpdate, self).__init__()\n\n # Adding defaults.\n config['plex'].add({\n u'host': u'localhost',\n u'port': 32400,\n u'token': u'',\n u'library_name': u'Music'})\n\n self.register_listener('database_change', self.listen_for_db_change)\n\n def listen_for_db_change(self, lib, model):\n \"\"\"Listens for beets db change and register the update for the end\"\"\"\n self.register_listener('cli_exit', self.update)\n\n def update(self, lib):\n \"\"\"When the client exists try to send refresh request to Plex server.\n \"\"\"\n self._log.info('Updating Plex library...')\n\n # Try to send update request.\n try:\n update_plex(\n config['plex']['host'].get(),\n config['plex']['port'].get(),\n config['plex']['token'].get(),\n config['plex']['library_name'].get())\n self._log.info('... started.')\n\n except requests.exceptions.RequestException:\n self._log.warning('Update failed.')\n", "path": "beetsplug/plexupdate.py"}]} | 1,187 | 486 |
gh_patches_debug_21909 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-2512 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Strawberry cli commands fail with error: strawberry.exe\__main__.py not found
After upgrading strawberry to latest version (0.154.1), I am unable to run any strawberry cli commands.
## Describe the Bug
- Upgraded strawberry from 0.152.0 to 0.154.1
```
poetry add strawberry-graphql[debug-server]@0.154.1
```
- Executed below commands:
```
strawberry server myapp.schema
strawberry export-schema myapp.schema:schema
```
- Both these commands are failing in below error:
**FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\Users\\myyuser\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\straw-k47ybk7v-py3.10\\Scripts\\strawberry.exe\\\_\_main\_\_.py'**
## System Information
- Operating system: Windows 10
- Strawberry version (if applicable): 0.154.1
- Python: 3.10.9
## Additional Context
There is no issue with strawberry cli in version 0.152.0 which I am using currently. If we downgrade the package to this version, cli commands work just fine.
Strawberry cli commands fail with error: strawberry.exe\__main__.py not found
After upgrading strawberry to latest version (0.154.1), I am unable to run any strawberry cli commands.
## Describe the Bug
- Upgraded strawberry from 0.152.0 to 0.154.1
```
poetry add strawberry-graphql[debug-server]@0.154.1
```
- Executed below commands:
```
strawberry server myapp.schema
strawberry export-schema myapp.schema:schema
```
- Both these commands are failing in below error:
**FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\Users\\myyuser\\AppData\\Local\\pypoetry\\Cache\\virtualenvs\\straw-k47ybk7v-py3.10\\Scripts\\strawberry.exe\\\_\_main\_\_.py'**
## System Information
- Operating system: Windows 10
- Strawberry version (if applicable): 0.154.1
- Python: 3.10.9
## Additional Context
There is no issue with strawberry cli in version 0.152.0 which I am using currently. If we downgrade the package to this version, cli commands work just fine.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/lazy_type.py`
Content:
```
1 import importlib
2 import inspect
3 import sys
4 import warnings
5 from dataclasses import dataclass
6 from pathlib import Path
7 from typing import ForwardRef, Generic, Optional, Type, TypeVar, cast
8
9 TypeName = TypeVar("TypeName")
10 Module = TypeVar("Module")
11
12
13 @dataclass(frozen=True)
14 class LazyType(Generic[TypeName, Module]):
15 type_name: str
16 module: str
17 package: Optional[str] = None
18
19 def __class_getitem__(cls, params):
20 warnings.warn(
21 (
22 "LazyType is deprecated, use "
23 "Annotated[YourType, strawberry.lazy(path)] instead"
24 ),
25 DeprecationWarning,
26 stacklevel=2,
27 )
28
29 type_name, module = params
30
31 package = None
32
33 if module.startswith("."):
34 current_frame = inspect.currentframe()
35 assert current_frame is not None
36 assert current_frame.f_back is not None
37 package = current_frame.f_back.f_globals["__package__"]
38
39 return cls(type_name, module, package)
40
41 def resolve_type(self) -> Type:
42 module = importlib.import_module(self.module, self.package)
43 main_module = sys.modules.get("__main__", None)
44 if main_module:
45 # If lazy type points to the main module, use it instead of the imported
46 # module. Otherwise duplication checks during schema-conversion might fail.
47 # Refer to: https://github.com/strawberry-graphql/strawberry/issues/2397
48 if main_module.__spec__ and main_module.__spec__.name == self.module:
49 module = main_module
50 elif hasattr(main_module, "__file__") and hasattr(module, "__file__"):
51 if (
52 main_module.__file__
53 and module.__file__
54 and Path(main_module.__file__).samefile(module.__file__)
55 ):
56 module = main_module
57 return module.__dict__[self.type_name]
58
59 # this empty call method allows LazyTypes to be used in generic types
60 # for example: List[LazyType["A", "module"]]
61
62 def __call__(self): # pragma: no cover
63 return None
64
65
66 class StrawberryLazyReference:
67 def __init__(self, module: str) -> None:
68 self.module = module
69 self.package = None
70
71 if module.startswith("."):
72 frame = inspect.stack()[2][0]
73 # TODO: raise a nice error if frame is None
74 assert frame is not None
75 self.package = cast(str, frame.f_globals["__package__"])
76
77 def resolve_forward_ref(self, forward_ref: ForwardRef) -> LazyType:
78 return LazyType(forward_ref.__forward_arg__, self.module, self.package)
79
80
81 def lazy(module_path: str) -> StrawberryLazyReference:
82 return StrawberryLazyReference(module_path)
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/lazy_type.py b/strawberry/lazy_type.py
--- a/strawberry/lazy_type.py
+++ b/strawberry/lazy_type.py
@@ -48,12 +48,16 @@
if main_module.__spec__ and main_module.__spec__.name == self.module:
module = main_module
elif hasattr(main_module, "__file__") and hasattr(module, "__file__"):
- if (
- main_module.__file__
- and module.__file__
- and Path(main_module.__file__).samefile(module.__file__)
- ):
- module = main_module
+ main_file = main_module.__file__
+ module_file = module.__file__
+ if main_file and module_file:
+ try:
+ is_samefile = Path(main_file).samefile(module_file)
+ except FileNotFoundError:
+ # Can be raised when run through the CLI as the __main__ file
+ # path contains `strawberry.exe`
+ is_samefile = False
+ module = main_module if is_samefile else module
return module.__dict__[self.type_name]
# this empty call method allows LazyTypes to be used in generic types
| {"golden_diff": "diff --git a/strawberry/lazy_type.py b/strawberry/lazy_type.py\n--- a/strawberry/lazy_type.py\n+++ b/strawberry/lazy_type.py\n@@ -48,12 +48,16 @@\n if main_module.__spec__ and main_module.__spec__.name == self.module:\n module = main_module\n elif hasattr(main_module, \"__file__\") and hasattr(module, \"__file__\"):\n- if (\n- main_module.__file__\n- and module.__file__\n- and Path(main_module.__file__).samefile(module.__file__)\n- ):\n- module = main_module\n+ main_file = main_module.__file__\n+ module_file = module.__file__\n+ if main_file and module_file:\n+ try:\n+ is_samefile = Path(main_file).samefile(module_file)\n+ except FileNotFoundError:\n+ # Can be raised when run through the CLI as the __main__ file\n+ # path contains `strawberry.exe`\n+ is_samefile = False\n+ module = main_module if is_samefile else module\n return module.__dict__[self.type_name]\n \n # this empty call method allows LazyTypes to be used in generic types\n", "issue": "Strawberry cli commands fail with error: strawberry.exe\\__main__.py not found\nAfter upgrading strawberry to latest version (0.154.1), I am unable to run any strawberry cli commands.\r\n\r\n## Describe the Bug\r\n- Upgraded strawberry from 0.152.0 to 0.154.1\r\n```\r\npoetry add strawberry-graphql[debug-server]@0.154.1\r\n```\r\n- Executed below commands:\r\n```\r\nstrawberry server myapp.schema\r\nstrawberry export-schema myapp.schema:schema\r\n```\r\n- Both these commands are failing in below error:\r\n\r\n**FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\\\Users\\\\myyuser\\\\AppData\\\\Local\\\\pypoetry\\\\Cache\\\\virtualenvs\\\\straw-k47ybk7v-py3.10\\\\Scripts\\\\strawberry.exe\\\\\\_\\_main\\_\\_.py'**\r\n\r\n## System Information\r\n\r\n - Operating system: Windows 10\r\n - Strawberry version (if applicable): 0.154.1\r\n - Python: 3.10.9\r\n\r\n## Additional Context\r\n\r\nThere is no issue with strawberry cli in version 0.152.0 which I am using currently. If we downgrade the package to this version, cli commands work just fine.\r\n\nStrawberry cli commands fail with error: strawberry.exe\\__main__.py not found\nAfter upgrading strawberry to latest version (0.154.1), I am unable to run any strawberry cli commands.\r\n\r\n## Describe the Bug\r\n- Upgraded strawberry from 0.152.0 to 0.154.1\r\n```\r\npoetry add strawberry-graphql[debug-server]@0.154.1\r\n```\r\n- Executed below commands:\r\n```\r\nstrawberry server myapp.schema\r\nstrawberry export-schema myapp.schema:schema\r\n```\r\n- Both these commands are failing in below error:\r\n\r\n**FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\\\Users\\\\myyuser\\\\AppData\\\\Local\\\\pypoetry\\\\Cache\\\\virtualenvs\\\\straw-k47ybk7v-py3.10\\\\Scripts\\\\strawberry.exe\\\\\\_\\_main\\_\\_.py'**\r\n\r\n## System Information\r\n\r\n - Operating system: Windows 10\r\n - Strawberry version (if applicable): 0.154.1\r\n - Python: 3.10.9\r\n\r\n## Additional Context\r\n\r\nThere is no issue with strawberry cli in version 0.152.0 which I am using currently. If we downgrade the package to this version, cli commands work just fine.\r\n\n", "before_files": [{"content": "import importlib\nimport inspect\nimport sys\nimport warnings\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import ForwardRef, Generic, Optional, Type, TypeVar, cast\n\nTypeName = TypeVar(\"TypeName\")\nModule = TypeVar(\"Module\")\n\n\n@dataclass(frozen=True)\nclass LazyType(Generic[TypeName, Module]):\n type_name: str\n module: str\n package: Optional[str] = None\n\n def __class_getitem__(cls, params):\n warnings.warn(\n (\n \"LazyType is deprecated, use \"\n \"Annotated[YourType, strawberry.lazy(path)] instead\"\n ),\n DeprecationWarning,\n stacklevel=2,\n )\n\n type_name, module = params\n\n package = None\n\n if module.startswith(\".\"):\n current_frame = inspect.currentframe()\n assert current_frame is not None\n assert current_frame.f_back is not None\n package = current_frame.f_back.f_globals[\"__package__\"]\n\n return cls(type_name, module, package)\n\n def resolve_type(self) -> Type:\n module = importlib.import_module(self.module, self.package)\n main_module = sys.modules.get(\"__main__\", None)\n if main_module:\n # If lazy type points to the main module, use it instead of the imported\n # module. Otherwise duplication checks during schema-conversion might fail.\n # Refer to: https://github.com/strawberry-graphql/strawberry/issues/2397\n if main_module.__spec__ and main_module.__spec__.name == self.module:\n module = main_module\n elif hasattr(main_module, \"__file__\") and hasattr(module, \"__file__\"):\n if (\n main_module.__file__\n and module.__file__\n and Path(main_module.__file__).samefile(module.__file__)\n ):\n module = main_module\n return module.__dict__[self.type_name]\n\n # this empty call method allows LazyTypes to be used in generic types\n # for example: List[LazyType[\"A\", \"module\"]]\n\n def __call__(self): # pragma: no cover\n return None\n\n\nclass StrawberryLazyReference:\n def __init__(self, module: str) -> None:\n self.module = module\n self.package = None\n\n if module.startswith(\".\"):\n frame = inspect.stack()[2][0]\n # TODO: raise a nice error if frame is None\n assert frame is not None\n self.package = cast(str, frame.f_globals[\"__package__\"])\n\n def resolve_forward_ref(self, forward_ref: ForwardRef) -> LazyType:\n return LazyType(forward_ref.__forward_arg__, self.module, self.package)\n\n\ndef lazy(module_path: str) -> StrawberryLazyReference:\n return StrawberryLazyReference(module_path)\n", "path": "strawberry/lazy_type.py"}], "after_files": [{"content": "import importlib\nimport inspect\nimport sys\nimport warnings\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import ForwardRef, Generic, Optional, Type, TypeVar, cast\n\nTypeName = TypeVar(\"TypeName\")\nModule = TypeVar(\"Module\")\n\n\n@dataclass(frozen=True)\nclass LazyType(Generic[TypeName, Module]):\n type_name: str\n module: str\n package: Optional[str] = None\n\n def __class_getitem__(cls, params):\n warnings.warn(\n (\n \"LazyType is deprecated, use \"\n \"Annotated[YourType, strawberry.lazy(path)] instead\"\n ),\n DeprecationWarning,\n stacklevel=2,\n )\n\n type_name, module = params\n\n package = None\n\n if module.startswith(\".\"):\n current_frame = inspect.currentframe()\n assert current_frame is not None\n assert current_frame.f_back is not None\n package = current_frame.f_back.f_globals[\"__package__\"]\n\n return cls(type_name, module, package)\n\n def resolve_type(self) -> Type:\n module = importlib.import_module(self.module, self.package)\n main_module = sys.modules.get(\"__main__\", None)\n if main_module:\n # If lazy type points to the main module, use it instead of the imported\n # module. Otherwise duplication checks during schema-conversion might fail.\n # Refer to: https://github.com/strawberry-graphql/strawberry/issues/2397\n if main_module.__spec__ and main_module.__spec__.name == self.module:\n module = main_module\n elif hasattr(main_module, \"__file__\") and hasattr(module, \"__file__\"):\n main_file = main_module.__file__\n module_file = module.__file__\n if main_file and module_file:\n try:\n is_samefile = Path(main_file).samefile(module_file)\n except FileNotFoundError:\n # Can be raised when run through the CLI as the __main__ file\n # path contains `strawberry.exe`\n is_samefile = False\n module = main_module if is_samefile else module\n return module.__dict__[self.type_name]\n\n # this empty call method allows LazyTypes to be used in generic types\n # for example: List[LazyType[\"A\", \"module\"]]\n\n def __call__(self): # pragma: no cover\n return None\n\n\nclass StrawberryLazyReference:\n def __init__(self, module: str) -> None:\n self.module = module\n self.package = None\n\n if module.startswith(\".\"):\n frame = inspect.stack()[2][0]\n # TODO: raise a nice error if frame is None\n assert frame is not None\n self.package = cast(str, frame.f_globals[\"__package__\"])\n\n def resolve_forward_ref(self, forward_ref: ForwardRef) -> LazyType:\n return LazyType(forward_ref.__forward_arg__, self.module, self.package)\n\n\ndef lazy(module_path: str) -> StrawberryLazyReference:\n return StrawberryLazyReference(module_path)\n", "path": "strawberry/lazy_type.py"}]} | 1,610 | 272 |
gh_patches_debug_31697 | rasdani/github-patches | git_diff | conda-forge__conda-smithy-240 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Don't use Exception.message if you want to work on py3k
Found in https://github.com/conda-forge/staged-recipes/issues/720. Currently exists in 2 places at https://github.com/conda-forge/conda-smithy/blob/6c1856a51832b5a014bb9126467804d75d712029/conda_smithy/cli.py.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda_smithy/cli.py`
Content:
```
1 from __future__ import print_function, absolute_import
2
3 import os
4 import requests
5 import subprocess
6 import sys
7 import time
8 import argparse
9
10 from conda_build.metadata import MetaData
11
12 from . import ci_register
13 from . import configure_feedstock
14 from . import lint_recipe
15 from . import __version__
16
17
18 PY2 = sys.version_info[0] == 2
19
20 def generate_feedstock_content(target_directory, source_recipe_dir, meta):
21 recipe_dir = "recipe"
22 target_recipe_dir = os.path.join(target_directory, recipe_dir)
23 if not os.path.exists(target_recipe_dir):
24 os.makedirs(target_recipe_dir)
25 # If there is a source recipe, copy it now to the right dir
26 if source_recipe_dir:
27 configure_feedstock.copytree(source_recipe_dir, target_recipe_dir)
28
29 forge_yml = os.path.join(target_directory, 'conda-forge.yml')
30 if not os.path.exists(forge_yml):
31 with open(forge_yml, 'w') as fh:
32 fh.write('[]')
33
34 configure_feedstock.main(target_directory)
35
36
37 def init_git_repo(target):
38 subprocess.check_call(['git', 'init'], cwd=target)
39
40
41 def create_git_repo(target, msg):
42 init_git_repo(target)
43 subprocess.check_call(['git', 'add', '*'], cwd=target)
44 if sys.platform == "win32":
45 # prevent this:
46 # bash: line 1: ./ci_support/run_docker_build.sh: Permission denied
47 # ./ci_support/run_docker_build.sh returned exit code 126
48 subprocess.check_call(['git', 'update-index', '--chmod=+x', 'ci_support/run_docker_build.sh'], cwd=target)
49 subprocess.check_call(['git', 'commit', '-m', msg], cwd=target)
50
51
52 class Subcommand(object):
53 #: The name of the subcommand
54 subcommand = None
55 aliases = []
56 def __init__(self, parser, help=None):
57 if PY2:
58 # aliases not allowed in 2.7 :-(
59 subcommand_parser = parser.add_parser(self.subcommand, help=help)
60 else:
61 subcommand_parser = parser.add_parser(self.subcommand, help=help, aliases=self.aliases)
62
63 subcommand_parser.set_defaults(subcommand_func=self)
64 self.subcommand_parser = subcommand_parser
65
66 def __call__(self, args):
67 pass
68
69
70 class Init(Subcommand):
71 subcommand = 'init'
72 def __init__(self, parser):
73 # conda-smithy init /path/to/udunits-recipe ./
74
75 super(Init, self).__init__(parser, "Create a feedstock git repository, which can contain "
76 "one conda recipes.")
77 scp = self.subcommand_parser
78 scp.add_argument("recipe_directory", help="The path to the source recipe directory.")
79 scp.add_argument("--feedstock-directory", default='./{package.name}-feedstock',
80 help="Target directory, where the new feedstock git repository should be "
81 "created. (Default: './<packagename>-feedstock')")
82 scp.add_argument("--no-git-repo", action='store_true',
83 default=False,
84 help="Do not init the feedstock as a git repository.")
85
86 def __call__(self, args):
87 # check some error conditions
88 if args.recipe_directory and not os.path.isdir(args.recipe_directory):
89 raise IOError("The source recipe directory should be the directory of the "
90 "conda-recipe you want to build a feedstock for. Got {}".format(
91 args.recipe_directory))
92
93 # Get some information about the source recipe.
94 if args.recipe_directory:
95 meta = MetaData(args.recipe_directory)
96 else:
97 meta = None
98
99 feedstock_directory = args.feedstock_directory.format(package=argparse.Namespace(name=meta.name()))
100 msg = 'Initial commit of the {} feedstock.'.format(meta.name())
101
102 try:
103 generate_feedstock_content(feedstock_directory, args.recipe_directory, meta)
104 if not args.no_git_repo:
105 create_git_repo(feedstock_directory, msg)
106
107 print("\nRepository created, please edit conda-forge.yml to configure the upload channels\n"
108 "and afterwards call 'conda smithy register-github'")
109 except RuntimeError as e:
110 print(e.message)
111
112
113 class RegisterGithub(Subcommand):
114 subcommand = 'register-github'
115 def __init__(self, parser):
116 # conda-smithy register-github ./ --organization=conda-forge
117 super(RegisterGithub, self).__init__(parser, "Register a repo for a feedstock at github.")
118 scp = self.subcommand_parser
119 scp.add_argument("feedstock_directory",
120 help="The directory of the feedstock git repository.")
121 group = scp.add_mutually_exclusive_group()
122 group.add_argument("--user", help="github username under which to register this repo")
123 group.add_argument("--organization", default="conda-forge",
124 help="github organisation under which to register this repo")
125 scp.add_argument("--remote-name", default="upstream",
126 help="The name of the remote to add to the local repo (default: upstream). "
127 "An empty string will disable adding of a remote.")
128
129 def __call__(self, args):
130 from . import github
131 try:
132 github.create_github_repo(args)
133 print("\nRepository registered at github, now call 'conda smithy register-ci'")
134 except RuntimeError as e:
135 print(e.message)
136
137
138 class RegisterCI(Subcommand):
139 subcommand = 'register-ci'
140 def __init__(self, parser):
141 # conda-smithy register-ci ./
142 super(RegisterCI, self).__init__(parser, "Register a feedstock at the CI "
143 "services which do the builds.")
144 scp = self.subcommand_parser
145 scp.add_argument("--feedstock_directory", default=os.getcwd(),
146 help="The directory of the feedstock git repository.")
147 group = scp.add_mutually_exclusive_group()
148 group.add_argument("--user", help="github username under which to register this repo")
149 group.add_argument("--organization", default="conda-forge",
150 help="github organisation under which to register this repo")
151
152 def __call__(self, args):
153 owner = args.user or args.organization
154 repo = os.path.basename(os.path.abspath(args.feedstock_directory))
155
156 print('CI Summary for {}/{} (can take ~30s):'.format(owner, repo))
157 try:
158 ci_register.add_project_to_travis(owner, repo)
159 ci_register.travis_token_update_conda_forge_config(args.feedstock_directory, owner, repo)
160 time.sleep(1)
161 ci_register.travis_configure(owner, repo)
162 ci_register.add_project_to_circle(owner, repo)
163 ci_register.add_token_to_circle(owner, repo)
164 ci_register.add_project_to_appveyor(owner, repo)
165 ci_register.appveyor_encrypt_binstar_token(args.feedstock_directory, owner, repo)
166 ci_register.appveyor_configure(owner, repo)
167 ci_register.add_conda_linting(owner, repo)
168 print("\nCI services have been enabled enabled. You may wish to regnerate the feedstock.\n"
169 "Any changes will need commiting to the repo.")
170 except RuntimeError as e:
171 print(e.message)
172
173 class Regenerate(Subcommand):
174 subcommand = 'regenerate'
175 aliases = ['rerender']
176 def __init__(self, parser):
177 super(Regenerate, self).__init__(parser, "Regenerate / update the CI support files of the "
178 "feedstock.")
179 scp = self.subcommand_parser
180 scp.add_argument("--feedstock_directory", default=os.getcwd(),
181 help="The directory of the feedstock git repository.")
182
183 def __call__(self, args):
184 try:
185 configure_feedstock.main(args.feedstock_directory)
186 print("\nCI support files regenerated. These need to be pushed to github!")
187 except RuntimeError as e:
188 print(e.message)
189
190
191 class RecipeLint(Subcommand):
192 subcommand = 'recipe-lint'
193 def __init__(self, parser):
194 super(RecipeLint, self).__init__(parser, "Lint a single conda recipe.")
195 scp = self.subcommand_parser
196 scp.add_argument("recipe_directory", default=[os.getcwd()], nargs='*')
197
198 def __call__(self, args):
199 all_good = True
200 for recipe in args.recipe_directory:
201 lint = lint_recipe.main(os.path.join(recipe))
202 if lint:
203 all_good = False
204 print('{} has some lint:\n {}'.format(recipe, '\n '.join(lint)))
205 else:
206 print('{} is in fine form'.format(recipe))
207 # Exit code 1 for some lint, 0 for no lint.
208 sys.exit(int(not all_good))
209
210
211
212 def main():
213
214 parser = argparse.ArgumentParser("a tool to help create, administer and manage feedstocks.")
215 subparser = parser.add_subparsers()
216 # TODO: Consider allowing plugins/extensions using entry_points.
217 # http://reinout.vanrees.org/weblog/2010/01/06/zest-releaser-entry-points.html
218 for subcommand in Subcommand.__subclasses__():
219 subcommand(subparser)
220 # And the alias for rerender
221 if PY2:
222 class Rerender(Regenerate):
223 # A poor-man's alias for regenerate.
224 subcommand = 'rerender'
225 Rerender(subparser)
226
227 parser.add_argument('--version', action='version',
228 version=__version__,
229 help="Show conda-smithy's version, and exit.")
230
231 if not sys.argv[1:]:
232 args = parser.parse_args(['--help'])
233 else:
234 args = parser.parse_args()
235
236 args.subcommand_func(args)
237
238
239 if __name__ == '__main__':
240 main()
241
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda_smithy/cli.py b/conda_smithy/cli.py
--- a/conda_smithy/cli.py
+++ b/conda_smithy/cli.py
@@ -107,7 +107,7 @@
print("\nRepository created, please edit conda-forge.yml to configure the upload channels\n"
"and afterwards call 'conda smithy register-github'")
except RuntimeError as e:
- print(e.message)
+ print(e)
class RegisterGithub(Subcommand):
@@ -132,7 +132,7 @@
github.create_github_repo(args)
print("\nRepository registered at github, now call 'conda smithy register-ci'")
except RuntimeError as e:
- print(e.message)
+ print(e)
class RegisterCI(Subcommand):
@@ -168,7 +168,7 @@
print("\nCI services have been enabled enabled. You may wish to regnerate the feedstock.\n"
"Any changes will need commiting to the repo.")
except RuntimeError as e:
- print(e.message)
+ print(e)
class Regenerate(Subcommand):
subcommand = 'regenerate'
@@ -185,7 +185,7 @@
configure_feedstock.main(args.feedstock_directory)
print("\nCI support files regenerated. These need to be pushed to github!")
except RuntimeError as e:
- print(e.message)
+ print(e)
class RecipeLint(Subcommand):
| {"golden_diff": "diff --git a/conda_smithy/cli.py b/conda_smithy/cli.py\n--- a/conda_smithy/cli.py\n+++ b/conda_smithy/cli.py\n@@ -107,7 +107,7 @@\n print(\"\\nRepository created, please edit conda-forge.yml to configure the upload channels\\n\"\n \"and afterwards call 'conda smithy register-github'\")\n except RuntimeError as e:\n- print(e.message)\n+ print(e)\n \n \n class RegisterGithub(Subcommand):\n@@ -132,7 +132,7 @@\n github.create_github_repo(args)\n print(\"\\nRepository registered at github, now call 'conda smithy register-ci'\")\n except RuntimeError as e:\n- print(e.message)\n+ print(e)\n \n \n class RegisterCI(Subcommand):\n@@ -168,7 +168,7 @@\n print(\"\\nCI services have been enabled enabled. You may wish to regnerate the feedstock.\\n\"\n \"Any changes will need commiting to the repo.\")\n except RuntimeError as e:\n- print(e.message)\n+ print(e)\n \n class Regenerate(Subcommand):\n subcommand = 'regenerate'\n@@ -185,7 +185,7 @@\n configure_feedstock.main(args.feedstock_directory)\n print(\"\\nCI support files regenerated. These need to be pushed to github!\")\n except RuntimeError as e:\n- print(e.message)\n+ print(e)\n \n \n class RecipeLint(Subcommand):\n", "issue": "Don't use Exception.message if you want to work on py3k\nFound in https://github.com/conda-forge/staged-recipes/issues/720. Currently exists in 2 places at https://github.com/conda-forge/conda-smithy/blob/6c1856a51832b5a014bb9126467804d75d712029/conda_smithy/cli.py.\n\n", "before_files": [{"content": "from __future__ import print_function, absolute_import\n\nimport os\nimport requests\nimport subprocess\nimport sys\nimport time\nimport argparse\n\nfrom conda_build.metadata import MetaData\n\nfrom . import ci_register\nfrom . import configure_feedstock\nfrom . import lint_recipe\nfrom . import __version__\n\n\nPY2 = sys.version_info[0] == 2\n\ndef generate_feedstock_content(target_directory, source_recipe_dir, meta):\n recipe_dir = \"recipe\"\n target_recipe_dir = os.path.join(target_directory, recipe_dir)\n if not os.path.exists(target_recipe_dir):\n os.makedirs(target_recipe_dir)\n # If there is a source recipe, copy it now to the right dir\n if source_recipe_dir:\n configure_feedstock.copytree(source_recipe_dir, target_recipe_dir)\n\n forge_yml = os.path.join(target_directory, 'conda-forge.yml')\n if not os.path.exists(forge_yml):\n with open(forge_yml, 'w') as fh:\n fh.write('[]')\n\n configure_feedstock.main(target_directory)\n\n\ndef init_git_repo(target):\n subprocess.check_call(['git', 'init'], cwd=target)\n\n\ndef create_git_repo(target, msg):\n init_git_repo(target)\n subprocess.check_call(['git', 'add', '*'], cwd=target)\n if sys.platform == \"win32\":\n # prevent this:\n # bash: line 1: ./ci_support/run_docker_build.sh: Permission denied\n # ./ci_support/run_docker_build.sh returned exit code 126\n subprocess.check_call(['git', 'update-index', '--chmod=+x', 'ci_support/run_docker_build.sh'], cwd=target)\n subprocess.check_call(['git', 'commit', '-m', msg], cwd=target)\n\n\nclass Subcommand(object):\n #: The name of the subcommand\n subcommand = None\n aliases = []\n def __init__(self, parser, help=None):\n if PY2:\n # aliases not allowed in 2.7 :-(\n subcommand_parser = parser.add_parser(self.subcommand, help=help)\n else:\n subcommand_parser = parser.add_parser(self.subcommand, help=help, aliases=self.aliases)\n\n subcommand_parser.set_defaults(subcommand_func=self)\n self.subcommand_parser = subcommand_parser\n\n def __call__(self, args):\n pass\n\n\nclass Init(Subcommand):\n subcommand = 'init'\n def __init__(self, parser):\n # conda-smithy init /path/to/udunits-recipe ./\n\n super(Init, self).__init__(parser, \"Create a feedstock git repository, which can contain \"\n \"one conda recipes.\")\n scp = self.subcommand_parser\n scp.add_argument(\"recipe_directory\", help=\"The path to the source recipe directory.\")\n scp.add_argument(\"--feedstock-directory\", default='./{package.name}-feedstock',\n help=\"Target directory, where the new feedstock git repository should be \"\n \"created. (Default: './<packagename>-feedstock')\")\n scp.add_argument(\"--no-git-repo\", action='store_true',\n default=False,\n help=\"Do not init the feedstock as a git repository.\")\n\n def __call__(self, args):\n # check some error conditions\n if args.recipe_directory and not os.path.isdir(args.recipe_directory):\n raise IOError(\"The source recipe directory should be the directory of the \"\n \"conda-recipe you want to build a feedstock for. Got {}\".format(\n args.recipe_directory))\n\n # Get some information about the source recipe.\n if args.recipe_directory:\n meta = MetaData(args.recipe_directory)\n else:\n meta = None\n\n feedstock_directory = args.feedstock_directory.format(package=argparse.Namespace(name=meta.name()))\n msg = 'Initial commit of the {} feedstock.'.format(meta.name())\n\n try:\n generate_feedstock_content(feedstock_directory, args.recipe_directory, meta)\n if not args.no_git_repo:\n create_git_repo(feedstock_directory, msg)\n\n print(\"\\nRepository created, please edit conda-forge.yml to configure the upload channels\\n\"\n \"and afterwards call 'conda smithy register-github'\")\n except RuntimeError as e:\n print(e.message)\n\n\nclass RegisterGithub(Subcommand):\n subcommand = 'register-github'\n def __init__(self, parser):\n # conda-smithy register-github ./ --organization=conda-forge\n super(RegisterGithub, self).__init__(parser, \"Register a repo for a feedstock at github.\")\n scp = self.subcommand_parser\n scp.add_argument(\"feedstock_directory\",\n help=\"The directory of the feedstock git repository.\")\n group = scp.add_mutually_exclusive_group()\n group.add_argument(\"--user\", help=\"github username under which to register this repo\")\n group.add_argument(\"--organization\", default=\"conda-forge\",\n help=\"github organisation under which to register this repo\")\n scp.add_argument(\"--remote-name\", default=\"upstream\",\n help=\"The name of the remote to add to the local repo (default: upstream). \"\n \"An empty string will disable adding of a remote.\")\n\n def __call__(self, args):\n from . import github\n try:\n github.create_github_repo(args)\n print(\"\\nRepository registered at github, now call 'conda smithy register-ci'\")\n except RuntimeError as e:\n print(e.message)\n\n\nclass RegisterCI(Subcommand):\n subcommand = 'register-ci'\n def __init__(self, parser):\n # conda-smithy register-ci ./\n super(RegisterCI, self).__init__(parser, \"Register a feedstock at the CI \"\n \"services which do the builds.\")\n scp = self.subcommand_parser\n scp.add_argument(\"--feedstock_directory\", default=os.getcwd(),\n help=\"The directory of the feedstock git repository.\")\n group = scp.add_mutually_exclusive_group()\n group.add_argument(\"--user\", help=\"github username under which to register this repo\")\n group.add_argument(\"--organization\", default=\"conda-forge\",\n help=\"github organisation under which to register this repo\")\n\n def __call__(self, args):\n owner = args.user or args.organization\n repo = os.path.basename(os.path.abspath(args.feedstock_directory))\n\n print('CI Summary for {}/{} (can take ~30s):'.format(owner, repo))\n try:\n ci_register.add_project_to_travis(owner, repo)\n ci_register.travis_token_update_conda_forge_config(args.feedstock_directory, owner, repo)\n time.sleep(1)\n ci_register.travis_configure(owner, repo)\n ci_register.add_project_to_circle(owner, repo)\n ci_register.add_token_to_circle(owner, repo)\n ci_register.add_project_to_appveyor(owner, repo)\n ci_register.appveyor_encrypt_binstar_token(args.feedstock_directory, owner, repo)\n ci_register.appveyor_configure(owner, repo)\n ci_register.add_conda_linting(owner, repo)\n print(\"\\nCI services have been enabled enabled. You may wish to regnerate the feedstock.\\n\"\n \"Any changes will need commiting to the repo.\")\n except RuntimeError as e:\n print(e.message)\n\nclass Regenerate(Subcommand):\n subcommand = 'regenerate'\n aliases = ['rerender']\n def __init__(self, parser):\n super(Regenerate, self).__init__(parser, \"Regenerate / update the CI support files of the \"\n \"feedstock.\")\n scp = self.subcommand_parser\n scp.add_argument(\"--feedstock_directory\", default=os.getcwd(),\n help=\"The directory of the feedstock git repository.\")\n\n def __call__(self, args):\n try:\n configure_feedstock.main(args.feedstock_directory)\n print(\"\\nCI support files regenerated. These need to be pushed to github!\")\n except RuntimeError as e:\n print(e.message)\n\n\nclass RecipeLint(Subcommand):\n subcommand = 'recipe-lint'\n def __init__(self, parser):\n super(RecipeLint, self).__init__(parser, \"Lint a single conda recipe.\")\n scp = self.subcommand_parser\n scp.add_argument(\"recipe_directory\", default=[os.getcwd()], nargs='*')\n\n def __call__(self, args):\n all_good = True\n for recipe in args.recipe_directory:\n lint = lint_recipe.main(os.path.join(recipe))\n if lint:\n all_good = False\n print('{} has some lint:\\n {}'.format(recipe, '\\n '.join(lint)))\n else:\n print('{} is in fine form'.format(recipe))\n # Exit code 1 for some lint, 0 for no lint.\n sys.exit(int(not all_good))\n\n\n\ndef main():\n\n parser = argparse.ArgumentParser(\"a tool to help create, administer and manage feedstocks.\")\n subparser = parser.add_subparsers()\n # TODO: Consider allowing plugins/extensions using entry_points.\n # http://reinout.vanrees.org/weblog/2010/01/06/zest-releaser-entry-points.html\n for subcommand in Subcommand.__subclasses__():\n subcommand(subparser)\n # And the alias for rerender\n if PY2:\n class Rerender(Regenerate):\n # A poor-man's alias for regenerate.\n subcommand = 'rerender'\n Rerender(subparser)\n\n parser.add_argument('--version', action='version',\n version=__version__,\n help=\"Show conda-smithy's version, and exit.\")\n\n if not sys.argv[1:]:\n args = parser.parse_args(['--help'])\n else:\n args = parser.parse_args()\n\n args.subcommand_func(args)\n\n\nif __name__ == '__main__':\n main()\n", "path": "conda_smithy/cli.py"}], "after_files": [{"content": "from __future__ import print_function, absolute_import\n\nimport os\nimport requests\nimport subprocess\nimport sys\nimport time\nimport argparse\n\nfrom conda_build.metadata import MetaData\n\nfrom . import ci_register\nfrom . import configure_feedstock\nfrom . import lint_recipe\nfrom . import __version__\n\n\nPY2 = sys.version_info[0] == 2\n\ndef generate_feedstock_content(target_directory, source_recipe_dir, meta):\n recipe_dir = \"recipe\"\n target_recipe_dir = os.path.join(target_directory, recipe_dir)\n if not os.path.exists(target_recipe_dir):\n os.makedirs(target_recipe_dir)\n # If there is a source recipe, copy it now to the right dir\n if source_recipe_dir:\n configure_feedstock.copytree(source_recipe_dir, target_recipe_dir)\n\n forge_yml = os.path.join(target_directory, 'conda-forge.yml')\n if not os.path.exists(forge_yml):\n with open(forge_yml, 'w') as fh:\n fh.write('[]')\n\n configure_feedstock.main(target_directory)\n\n\ndef init_git_repo(target):\n subprocess.check_call(['git', 'init'], cwd=target)\n\n\ndef create_git_repo(target, msg):\n init_git_repo(target)\n subprocess.check_call(['git', 'add', '*'], cwd=target)\n if sys.platform == \"win32\":\n # prevent this:\n # bash: line 1: ./ci_support/run_docker_build.sh: Permission denied\n # ./ci_support/run_docker_build.sh returned exit code 126\n subprocess.check_call(['git', 'update-index', '--chmod=+x', 'ci_support/run_docker_build.sh'], cwd=target)\n subprocess.check_call(['git', 'commit', '-m', msg], cwd=target)\n\n\nclass Subcommand(object):\n #: The name of the subcommand\n subcommand = None\n aliases = []\n def __init__(self, parser, help=None):\n if PY2:\n # aliases not allowed in 2.7 :-(\n subcommand_parser = parser.add_parser(self.subcommand, help=help)\n else:\n subcommand_parser = parser.add_parser(self.subcommand, help=help, aliases=self.aliases)\n\n subcommand_parser.set_defaults(subcommand_func=self)\n self.subcommand_parser = subcommand_parser\n\n def __call__(self, args):\n pass\n\n\nclass Init(Subcommand):\n subcommand = 'init'\n def __init__(self, parser):\n # conda-smithy init /path/to/udunits-recipe ./\n\n super(Init, self).__init__(parser, \"Create a feedstock git repository, which can contain \"\n \"one conda recipes.\")\n scp = self.subcommand_parser\n scp.add_argument(\"recipe_directory\", help=\"The path to the source recipe directory.\")\n scp.add_argument(\"--feedstock-directory\", default='./{package.name}-feedstock',\n help=\"Target directory, where the new feedstock git repository should be \"\n \"created. (Default: './<packagename>-feedstock')\")\n scp.add_argument(\"--no-git-repo\", action='store_true',\n default=False,\n help=\"Do not init the feedstock as a git repository.\")\n\n def __call__(self, args):\n # check some error conditions\n if args.recipe_directory and not os.path.isdir(args.recipe_directory):\n raise IOError(\"The source recipe directory should be the directory of the \"\n \"conda-recipe you want to build a feedstock for. Got {}\".format(\n args.recipe_directory))\n\n # Get some information about the source recipe.\n if args.recipe_directory:\n meta = MetaData(args.recipe_directory)\n else:\n meta = None\n\n feedstock_directory = args.feedstock_directory.format(package=argparse.Namespace(name=meta.name()))\n msg = 'Initial commit of the {} feedstock.'.format(meta.name())\n\n try:\n generate_feedstock_content(feedstock_directory, args.recipe_directory, meta)\n if not args.no_git_repo:\n create_git_repo(feedstock_directory, msg)\n\n print(\"\\nRepository created, please edit conda-forge.yml to configure the upload channels\\n\"\n \"and afterwards call 'conda smithy register-github'\")\n except RuntimeError as e:\n print(e)\n\n\nclass RegisterGithub(Subcommand):\n subcommand = 'register-github'\n def __init__(self, parser):\n # conda-smithy register-github ./ --organization=conda-forge\n super(RegisterGithub, self).__init__(parser, \"Register a repo for a feedstock at github.\")\n scp = self.subcommand_parser\n scp.add_argument(\"feedstock_directory\",\n help=\"The directory of the feedstock git repository.\")\n group = scp.add_mutually_exclusive_group()\n group.add_argument(\"--user\", help=\"github username under which to register this repo\")\n group.add_argument(\"--organization\", default=\"conda-forge\",\n help=\"github organisation under which to register this repo\")\n scp.add_argument(\"--remote-name\", default=\"upstream\",\n help=\"The name of the remote to add to the local repo (default: upstream). \"\n \"An empty string will disable adding of a remote.\")\n\n def __call__(self, args):\n from . import github\n try:\n github.create_github_repo(args)\n print(\"\\nRepository registered at github, now call 'conda smithy register-ci'\")\n except RuntimeError as e:\n print(e)\n\n\nclass RegisterCI(Subcommand):\n subcommand = 'register-ci'\n def __init__(self, parser):\n # conda-smithy register-ci ./\n super(RegisterCI, self).__init__(parser, \"Register a feedstock at the CI \"\n \"services which do the builds.\")\n scp = self.subcommand_parser\n scp.add_argument(\"--feedstock_directory\", default=os.getcwd(),\n help=\"The directory of the feedstock git repository.\")\n group = scp.add_mutually_exclusive_group()\n group.add_argument(\"--user\", help=\"github username under which to register this repo\")\n group.add_argument(\"--organization\", default=\"conda-forge\",\n help=\"github organisation under which to register this repo\")\n\n def __call__(self, args):\n owner = args.user or args.organization\n repo = os.path.basename(os.path.abspath(args.feedstock_directory))\n\n print('CI Summary for {}/{} (can take ~30s):'.format(owner, repo))\n try:\n ci_register.add_project_to_travis(owner, repo)\n ci_register.travis_token_update_conda_forge_config(args.feedstock_directory, owner, repo)\n time.sleep(1)\n ci_register.travis_configure(owner, repo)\n ci_register.add_project_to_circle(owner, repo)\n ci_register.add_token_to_circle(owner, repo)\n ci_register.add_project_to_appveyor(owner, repo)\n ci_register.appveyor_encrypt_binstar_token(args.feedstock_directory, owner, repo)\n ci_register.appveyor_configure(owner, repo)\n ci_register.add_conda_linting(owner, repo)\n print(\"\\nCI services have been enabled enabled. You may wish to regnerate the feedstock.\\n\"\n \"Any changes will need commiting to the repo.\")\n except RuntimeError as e:\n print(e)\n\nclass Regenerate(Subcommand):\n subcommand = 'regenerate'\n aliases = ['rerender']\n def __init__(self, parser):\n super(Regenerate, self).__init__(parser, \"Regenerate / update the CI support files of the \"\n \"feedstock.\")\n scp = self.subcommand_parser\n scp.add_argument(\"--feedstock_directory\", default=os.getcwd(),\n help=\"The directory of the feedstock git repository.\")\n\n def __call__(self, args):\n try:\n configure_feedstock.main(args.feedstock_directory)\n print(\"\\nCI support files regenerated. These need to be pushed to github!\")\n except RuntimeError as e:\n print(e)\n\n\nclass RecipeLint(Subcommand):\n subcommand = 'recipe-lint'\n def __init__(self, parser):\n super(RecipeLint, self).__init__(parser, \"Lint a single conda recipe.\")\n scp = self.subcommand_parser\n scp.add_argument(\"recipe_directory\", default=[os.getcwd()], nargs='*')\n\n def __call__(self, args):\n all_good = True\n for recipe in args.recipe_directory:\n lint = lint_recipe.main(os.path.join(recipe))\n if lint:\n all_good = False\n print('{} has some lint:\\n {}'.format(recipe, '\\n '.join(lint)))\n else:\n print('{} is in fine form'.format(recipe))\n # Exit code 1 for some lint, 0 for no lint.\n sys.exit(int(not all_good))\n\n\n\ndef main():\n\n parser = argparse.ArgumentParser(\"a tool to help create, administer and manage feedstocks.\")\n subparser = parser.add_subparsers()\n # TODO: Consider allowing plugins/extensions using entry_points.\n # http://reinout.vanrees.org/weblog/2010/01/06/zest-releaser-entry-points.html\n for subcommand in Subcommand.__subclasses__():\n subcommand(subparser)\n # And the alias for rerender\n if PY2:\n class Rerender(Regenerate):\n # A poor-man's alias for regenerate.\n subcommand = 'rerender'\n Rerender(subparser)\n\n parser.add_argument('--version', action='version',\n version=__version__,\n help=\"Show conda-smithy's version, and exit.\")\n\n if not sys.argv[1:]:\n args = parser.parse_args(['--help'])\n else:\n args = parser.parse_args()\n\n args.subcommand_func(args)\n\n\nif __name__ == '__main__':\n main()\n", "path": "conda_smithy/cli.py"}]} | 3,074 | 323 |
gh_patches_debug_29315 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-2572 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a warning when using `@strawberry.federation.type` but not using `strawberry.federation.Schema`
This should make developer life easier if they forget to use the federation schema :)
See: https://discord.com/channels/689806334337482765/773519351423827978/950480727630303232
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/schema/schema.py`
Content:
```
1 from __future__ import annotations
2
3 from functools import lru_cache
4 from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Type, Union, cast
5
6 from graphql import (
7 GraphQLNamedType,
8 GraphQLNonNull,
9 GraphQLSchema,
10 get_introspection_query,
11 parse,
12 validate_schema,
13 )
14 from graphql.subscription import subscribe
15 from graphql.type.directives import specified_directives
16
17 from strawberry.annotation import StrawberryAnnotation
18 from strawberry.extensions.directives import (
19 DirectivesExtension,
20 DirectivesExtensionSync,
21 )
22 from strawberry.schema.schema_converter import GraphQLCoreConverter
23 from strawberry.schema.types.scalar import DEFAULT_SCALAR_REGISTRY
24 from strawberry.types import ExecutionContext
25 from strawberry.types.graphql import OperationType
26 from strawberry.types.types import TypeDefinition
27
28 from ..printer import print_schema
29 from . import compat
30 from .base import BaseSchema
31 from .config import StrawberryConfig
32 from .execute import execute, execute_sync
33
34 if TYPE_CHECKING:
35 from graphql import ExecutionContext as GraphQLExecutionContext
36
37 from strawberry.custom_scalar import ScalarDefinition, ScalarWrapper
38 from strawberry.directive import StrawberryDirective
39 from strawberry.enum import EnumDefinition
40 from strawberry.extensions import SchemaExtension
41 from strawberry.field import StrawberryField
42 from strawberry.types import ExecutionResult
43 from strawberry.union import StrawberryUnion
44
45 DEFAULT_ALLOWED_OPERATION_TYPES = {
46 OperationType.QUERY,
47 OperationType.MUTATION,
48 OperationType.SUBSCRIPTION,
49 }
50
51
52 class Schema(BaseSchema):
53 def __init__(
54 self,
55 # TODO: can we make sure we only allow to pass
56 # something that has been decorated?
57 query: Type,
58 mutation: Optional[Type] = None,
59 subscription: Optional[Type] = None,
60 directives: Iterable[StrawberryDirective] = (),
61 types=(),
62 extensions: Iterable[Union[Type[SchemaExtension], SchemaExtension]] = (),
63 execution_context_class: Optional[Type[GraphQLExecutionContext]] = None,
64 config: Optional[StrawberryConfig] = None,
65 scalar_overrides: Optional[
66 Dict[object, Union[Type, ScalarWrapper, ScalarDefinition]]
67 ] = None,
68 schema_directives: Iterable[object] = (),
69 ):
70 self.query = query
71 self.mutation = mutation
72 self.subscription = subscription
73
74 self.extensions = extensions
75 self.execution_context_class = execution_context_class
76 self.config = config or StrawberryConfig()
77
78 SCALAR_OVERRIDES_DICT_TYPE = Dict[
79 object, Union["ScalarWrapper", "ScalarDefinition"]
80 ]
81
82 scalar_registry: SCALAR_OVERRIDES_DICT_TYPE = {**DEFAULT_SCALAR_REGISTRY}
83 if scalar_overrides:
84 # TODO: check that the overrides are valid
85 scalar_registry.update(cast(SCALAR_OVERRIDES_DICT_TYPE, scalar_overrides))
86
87 self.schema_converter = GraphQLCoreConverter(self.config, scalar_registry)
88 self.directives = directives
89 self.schema_directives = list(schema_directives)
90
91 query_type = self.schema_converter.from_object(query._type_definition)
92 mutation_type = (
93 self.schema_converter.from_object(mutation._type_definition)
94 if mutation
95 else None
96 )
97 subscription_type = (
98 self.schema_converter.from_object(subscription._type_definition)
99 if subscription
100 else None
101 )
102
103 graphql_directives = [
104 self.schema_converter.from_directive(directive) for directive in directives
105 ]
106
107 graphql_types = []
108 for type_ in types:
109 if compat.is_schema_directive(type_):
110 graphql_directives.append(
111 self.schema_converter.from_schema_directive(type_)
112 )
113 else:
114 if hasattr(type_, "_type_definition"):
115 if type_._type_definition.is_generic:
116 type_ = StrawberryAnnotation(type_).resolve()
117 graphql_type = self.schema_converter.from_maybe_optional(type_)
118 if isinstance(graphql_type, GraphQLNonNull):
119 graphql_type = graphql_type.of_type
120 if not isinstance(graphql_type, GraphQLNamedType):
121 raise TypeError(f"{graphql_type} is not a named GraphQL Type")
122 graphql_types.append(graphql_type)
123
124 try:
125 self._schema = GraphQLSchema(
126 query=query_type,
127 mutation=mutation_type,
128 subscription=subscription_type if subscription else None,
129 directives=specified_directives + tuple(graphql_directives),
130 types=graphql_types,
131 extensions={
132 GraphQLCoreConverter.DEFINITION_BACKREF: self,
133 },
134 )
135
136 except TypeError as error:
137 # GraphQL core throws a TypeError if there's any exception raised
138 # during the schema creation, so we check if the cause was a
139 # StrawberryError and raise it instead if that's the case.
140
141 from strawberry.exceptions import StrawberryException
142
143 if isinstance(error.__cause__, StrawberryException):
144 raise error.__cause__ from None
145
146 raise
147
148 # attach our schema to the GraphQL schema instance
149 self._schema._strawberry_schema = self # type: ignore
150
151 # Validate schema early because we want developers to know about
152 # possible issues as soon as possible
153 errors = validate_schema(self._schema)
154 if errors:
155 formatted_errors = "\n\n".join(f"❌ {error.message}" for error in errors)
156 raise ValueError(f"Invalid Schema. Errors:\n\n{formatted_errors}")
157
158 def get_extensions(
159 self, sync: bool = False
160 ) -> List[Union[Type[SchemaExtension], SchemaExtension]]:
161 extensions = list(self.extensions)
162
163 if self.directives:
164 extensions.append(DirectivesExtensionSync if sync else DirectivesExtension)
165
166 return extensions
167
168 @lru_cache()
169 def get_type_by_name(
170 self, name: str
171 ) -> Optional[
172 Union[TypeDefinition, ScalarDefinition, EnumDefinition, StrawberryUnion]
173 ]:
174 # TODO: respect auto_camel_case
175 if name in self.schema_converter.type_map:
176 return self.schema_converter.type_map[name].definition
177
178 return None
179
180 def get_field_for_type(
181 self, field_name: str, type_name: str
182 ) -> Optional[StrawberryField]:
183 type_ = self.get_type_by_name(type_name)
184
185 if not type_:
186 return None # pragma: no cover
187
188 assert isinstance(type_, TypeDefinition)
189
190 return next(
191 (
192 field
193 for field in type_.fields
194 if self.config.name_converter.get_graphql_name(field) == field_name
195 ),
196 None,
197 )
198
199 @lru_cache()
200 def get_directive_by_name(self, graphql_name: str) -> Optional[StrawberryDirective]:
201 return next(
202 (
203 directive
204 for directive in self.directives
205 if self.config.name_converter.from_directive(directive) == graphql_name
206 ),
207 None,
208 )
209
210 async def execute(
211 self,
212 query: Optional[str],
213 variable_values: Optional[Dict[str, Any]] = None,
214 context_value: Optional[Any] = None,
215 root_value: Optional[Any] = None,
216 operation_name: Optional[str] = None,
217 allowed_operation_types: Optional[Iterable[OperationType]] = None,
218 ) -> ExecutionResult:
219 if allowed_operation_types is None:
220 allowed_operation_types = DEFAULT_ALLOWED_OPERATION_TYPES
221
222 # Create execution context
223 execution_context = ExecutionContext(
224 query=query,
225 schema=self,
226 context=context_value,
227 root_value=root_value,
228 variables=variable_values,
229 provided_operation_name=operation_name,
230 )
231
232 result = await execute(
233 self._schema,
234 extensions=self.get_extensions(),
235 execution_context_class=self.execution_context_class,
236 execution_context=execution_context,
237 allowed_operation_types=allowed_operation_types,
238 process_errors=self.process_errors,
239 )
240
241 return result
242
243 def execute_sync(
244 self,
245 query: Optional[str],
246 variable_values: Optional[Dict[str, Any]] = None,
247 context_value: Optional[Any] = None,
248 root_value: Optional[Any] = None,
249 operation_name: Optional[str] = None,
250 allowed_operation_types: Optional[Iterable[OperationType]] = None,
251 ) -> ExecutionResult:
252 if allowed_operation_types is None:
253 allowed_operation_types = DEFAULT_ALLOWED_OPERATION_TYPES
254
255 execution_context = ExecutionContext(
256 query=query,
257 schema=self,
258 context=context_value,
259 root_value=root_value,
260 variables=variable_values,
261 provided_operation_name=operation_name,
262 )
263
264 result = execute_sync(
265 self._schema,
266 extensions=self.get_extensions(sync=True),
267 execution_context_class=self.execution_context_class,
268 execution_context=execution_context,
269 allowed_operation_types=allowed_operation_types,
270 process_errors=self.process_errors,
271 )
272
273 return result
274
275 async def subscribe(
276 self,
277 # TODO: make this optional when we support extensions
278 query: str,
279 variable_values: Optional[Dict[str, Any]] = None,
280 context_value: Optional[Any] = None,
281 root_value: Optional[Any] = None,
282 operation_name: Optional[str] = None,
283 ):
284 return await subscribe(
285 self._schema,
286 parse(query),
287 root_value=root_value,
288 context_value=context_value,
289 variable_values=variable_values,
290 operation_name=operation_name,
291 )
292
293 def as_str(self) -> str:
294 return print_schema(self)
295
296 __str__ = as_str
297
298 def introspect(self) -> Dict[str, Any]:
299 """Return the introspection query result for the current schema
300
301 Raises:
302 ValueError: If the introspection query fails due to an invalid schema
303 """
304 introspection = self.execute_sync(get_introspection_query())
305 if introspection.errors or not introspection.data:
306 raise ValueError(f"Invalid Schema. Errors {introspection.errors!r}")
307
308 return introspection.data
309
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/schema/schema.py b/strawberry/schema/schema.py
--- a/strawberry/schema/schema.py
+++ b/strawberry/schema/schema.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+import warnings
from functools import lru_cache
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Type, Union, cast
@@ -148,6 +149,8 @@
# attach our schema to the GraphQL schema instance
self._schema._strawberry_schema = self # type: ignore
+ self._warn_for_federation_directives()
+
# Validate schema early because we want developers to know about
# possible issues as soon as possible
errors = validate_schema(self._schema)
@@ -290,6 +293,25 @@
operation_name=operation_name,
)
+ def _warn_for_federation_directives(self):
+ """Raises a warning if the schema has any federation directives."""
+ from strawberry.federation.schema_directives import FederationDirective
+
+ if any(
+ type_
+ for type_ in self.schema_converter.type_map.values()
+ if any(
+ directive
+ for directive in (type_.definition.directives or [])
+ if isinstance(directive, FederationDirective)
+ )
+ ):
+ warnings.warn(
+ "Federation directive found in schema. "
+ "Should use strawberry.federation.Schema instead.",
+ UserWarning,
+ )
+
def as_str(self) -> str:
return print_schema(self)
| {"golden_diff": "diff --git a/strawberry/schema/schema.py b/strawberry/schema/schema.py\n--- a/strawberry/schema/schema.py\n+++ b/strawberry/schema/schema.py\n@@ -1,5 +1,6 @@\n from __future__ import annotations\n \n+import warnings\n from functools import lru_cache\n from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Type, Union, cast\n \n@@ -148,6 +149,8 @@\n # attach our schema to the GraphQL schema instance\n self._schema._strawberry_schema = self # type: ignore\n \n+ self._warn_for_federation_directives()\n+\n # Validate schema early because we want developers to know about\n # possible issues as soon as possible\n errors = validate_schema(self._schema)\n@@ -290,6 +293,25 @@\n operation_name=operation_name,\n )\n \n+ def _warn_for_federation_directives(self):\n+ \"\"\"Raises a warning if the schema has any federation directives.\"\"\"\n+ from strawberry.federation.schema_directives import FederationDirective\n+\n+ if any(\n+ type_\n+ for type_ in self.schema_converter.type_map.values()\n+ if any(\n+ directive\n+ for directive in (type_.definition.directives or [])\n+ if isinstance(directive, FederationDirective)\n+ )\n+ ):\n+ warnings.warn(\n+ \"Federation directive found in schema. \"\n+ \"Should use strawberry.federation.Schema instead.\",\n+ UserWarning,\n+ )\n+\n def as_str(self) -> str:\n return print_schema(self)\n", "issue": "Add a warning when using `@strawberry.federation.type` but not using `strawberry.federation.Schema`\nThis should make developer life easier if they forget to use the federation schema :)\r\n\r\nSee: https://discord.com/channels/689806334337482765/773519351423827978/950480727630303232\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom functools import lru_cache\nfrom typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Type, Union, cast\n\nfrom graphql import (\n GraphQLNamedType,\n GraphQLNonNull,\n GraphQLSchema,\n get_introspection_query,\n parse,\n validate_schema,\n)\nfrom graphql.subscription import subscribe\nfrom graphql.type.directives import specified_directives\n\nfrom strawberry.annotation import StrawberryAnnotation\nfrom strawberry.extensions.directives import (\n DirectivesExtension,\n DirectivesExtensionSync,\n)\nfrom strawberry.schema.schema_converter import GraphQLCoreConverter\nfrom strawberry.schema.types.scalar import DEFAULT_SCALAR_REGISTRY\nfrom strawberry.types import ExecutionContext\nfrom strawberry.types.graphql import OperationType\nfrom strawberry.types.types import TypeDefinition\n\nfrom ..printer import print_schema\nfrom . import compat\nfrom .base import BaseSchema\nfrom .config import StrawberryConfig\nfrom .execute import execute, execute_sync\n\nif TYPE_CHECKING:\n from graphql import ExecutionContext as GraphQLExecutionContext\n\n from strawberry.custom_scalar import ScalarDefinition, ScalarWrapper\n from strawberry.directive import StrawberryDirective\n from strawberry.enum import EnumDefinition\n from strawberry.extensions import SchemaExtension\n from strawberry.field import StrawberryField\n from strawberry.types import ExecutionResult\n from strawberry.union import StrawberryUnion\n\nDEFAULT_ALLOWED_OPERATION_TYPES = {\n OperationType.QUERY,\n OperationType.MUTATION,\n OperationType.SUBSCRIPTION,\n}\n\n\nclass Schema(BaseSchema):\n def __init__(\n self,\n # TODO: can we make sure we only allow to pass\n # something that has been decorated?\n query: Type,\n mutation: Optional[Type] = None,\n subscription: Optional[Type] = None,\n directives: Iterable[StrawberryDirective] = (),\n types=(),\n extensions: Iterable[Union[Type[SchemaExtension], SchemaExtension]] = (),\n execution_context_class: Optional[Type[GraphQLExecutionContext]] = None,\n config: Optional[StrawberryConfig] = None,\n scalar_overrides: Optional[\n Dict[object, Union[Type, ScalarWrapper, ScalarDefinition]]\n ] = None,\n schema_directives: Iterable[object] = (),\n ):\n self.query = query\n self.mutation = mutation\n self.subscription = subscription\n\n self.extensions = extensions\n self.execution_context_class = execution_context_class\n self.config = config or StrawberryConfig()\n\n SCALAR_OVERRIDES_DICT_TYPE = Dict[\n object, Union[\"ScalarWrapper\", \"ScalarDefinition\"]\n ]\n\n scalar_registry: SCALAR_OVERRIDES_DICT_TYPE = {**DEFAULT_SCALAR_REGISTRY}\n if scalar_overrides:\n # TODO: check that the overrides are valid\n scalar_registry.update(cast(SCALAR_OVERRIDES_DICT_TYPE, scalar_overrides))\n\n self.schema_converter = GraphQLCoreConverter(self.config, scalar_registry)\n self.directives = directives\n self.schema_directives = list(schema_directives)\n\n query_type = self.schema_converter.from_object(query._type_definition)\n mutation_type = (\n self.schema_converter.from_object(mutation._type_definition)\n if mutation\n else None\n )\n subscription_type = (\n self.schema_converter.from_object(subscription._type_definition)\n if subscription\n else None\n )\n\n graphql_directives = [\n self.schema_converter.from_directive(directive) for directive in directives\n ]\n\n graphql_types = []\n for type_ in types:\n if compat.is_schema_directive(type_):\n graphql_directives.append(\n self.schema_converter.from_schema_directive(type_)\n )\n else:\n if hasattr(type_, \"_type_definition\"):\n if type_._type_definition.is_generic:\n type_ = StrawberryAnnotation(type_).resolve()\n graphql_type = self.schema_converter.from_maybe_optional(type_)\n if isinstance(graphql_type, GraphQLNonNull):\n graphql_type = graphql_type.of_type\n if not isinstance(graphql_type, GraphQLNamedType):\n raise TypeError(f\"{graphql_type} is not a named GraphQL Type\")\n graphql_types.append(graphql_type)\n\n try:\n self._schema = GraphQLSchema(\n query=query_type,\n mutation=mutation_type,\n subscription=subscription_type if subscription else None,\n directives=specified_directives + tuple(graphql_directives),\n types=graphql_types,\n extensions={\n GraphQLCoreConverter.DEFINITION_BACKREF: self,\n },\n )\n\n except TypeError as error:\n # GraphQL core throws a TypeError if there's any exception raised\n # during the schema creation, so we check if the cause was a\n # StrawberryError and raise it instead if that's the case.\n\n from strawberry.exceptions import StrawberryException\n\n if isinstance(error.__cause__, StrawberryException):\n raise error.__cause__ from None\n\n raise\n\n # attach our schema to the GraphQL schema instance\n self._schema._strawberry_schema = self # type: ignore\n\n # Validate schema early because we want developers to know about\n # possible issues as soon as possible\n errors = validate_schema(self._schema)\n if errors:\n formatted_errors = \"\\n\\n\".join(f\"\u274c {error.message}\" for error in errors)\n raise ValueError(f\"Invalid Schema. Errors:\\n\\n{formatted_errors}\")\n\n def get_extensions(\n self, sync: bool = False\n ) -> List[Union[Type[SchemaExtension], SchemaExtension]]:\n extensions = list(self.extensions)\n\n if self.directives:\n extensions.append(DirectivesExtensionSync if sync else DirectivesExtension)\n\n return extensions\n\n @lru_cache()\n def get_type_by_name(\n self, name: str\n ) -> Optional[\n Union[TypeDefinition, ScalarDefinition, EnumDefinition, StrawberryUnion]\n ]:\n # TODO: respect auto_camel_case\n if name in self.schema_converter.type_map:\n return self.schema_converter.type_map[name].definition\n\n return None\n\n def get_field_for_type(\n self, field_name: str, type_name: str\n ) -> Optional[StrawberryField]:\n type_ = self.get_type_by_name(type_name)\n\n if not type_:\n return None # pragma: no cover\n\n assert isinstance(type_, TypeDefinition)\n\n return next(\n (\n field\n for field in type_.fields\n if self.config.name_converter.get_graphql_name(field) == field_name\n ),\n None,\n )\n\n @lru_cache()\n def get_directive_by_name(self, graphql_name: str) -> Optional[StrawberryDirective]:\n return next(\n (\n directive\n for directive in self.directives\n if self.config.name_converter.from_directive(directive) == graphql_name\n ),\n None,\n )\n\n async def execute(\n self,\n query: Optional[str],\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n allowed_operation_types: Optional[Iterable[OperationType]] = None,\n ) -> ExecutionResult:\n if allowed_operation_types is None:\n allowed_operation_types = DEFAULT_ALLOWED_OPERATION_TYPES\n\n # Create execution context\n execution_context = ExecutionContext(\n query=query,\n schema=self,\n context=context_value,\n root_value=root_value,\n variables=variable_values,\n provided_operation_name=operation_name,\n )\n\n result = await execute(\n self._schema,\n extensions=self.get_extensions(),\n execution_context_class=self.execution_context_class,\n execution_context=execution_context,\n allowed_operation_types=allowed_operation_types,\n process_errors=self.process_errors,\n )\n\n return result\n\n def execute_sync(\n self,\n query: Optional[str],\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n allowed_operation_types: Optional[Iterable[OperationType]] = None,\n ) -> ExecutionResult:\n if allowed_operation_types is None:\n allowed_operation_types = DEFAULT_ALLOWED_OPERATION_TYPES\n\n execution_context = ExecutionContext(\n query=query,\n schema=self,\n context=context_value,\n root_value=root_value,\n variables=variable_values,\n provided_operation_name=operation_name,\n )\n\n result = execute_sync(\n self._schema,\n extensions=self.get_extensions(sync=True),\n execution_context_class=self.execution_context_class,\n execution_context=execution_context,\n allowed_operation_types=allowed_operation_types,\n process_errors=self.process_errors,\n )\n\n return result\n\n async def subscribe(\n self,\n # TODO: make this optional when we support extensions\n query: str,\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n ):\n return await subscribe(\n self._schema,\n parse(query),\n root_value=root_value,\n context_value=context_value,\n variable_values=variable_values,\n operation_name=operation_name,\n )\n\n def as_str(self) -> str:\n return print_schema(self)\n\n __str__ = as_str\n\n def introspect(self) -> Dict[str, Any]:\n \"\"\"Return the introspection query result for the current schema\n\n Raises:\n ValueError: If the introspection query fails due to an invalid schema\n \"\"\"\n introspection = self.execute_sync(get_introspection_query())\n if introspection.errors or not introspection.data:\n raise ValueError(f\"Invalid Schema. Errors {introspection.errors!r}\")\n\n return introspection.data\n", "path": "strawberry/schema/schema.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport warnings\nfrom functools import lru_cache\nfrom typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Type, Union, cast\n\nfrom graphql import (\n GraphQLNamedType,\n GraphQLNonNull,\n GraphQLSchema,\n get_introspection_query,\n parse,\n validate_schema,\n)\nfrom graphql.subscription import subscribe\nfrom graphql.type.directives import specified_directives\n\nfrom strawberry.annotation import StrawberryAnnotation\nfrom strawberry.extensions.directives import (\n DirectivesExtension,\n DirectivesExtensionSync,\n)\nfrom strawberry.schema.schema_converter import GraphQLCoreConverter\nfrom strawberry.schema.types.scalar import DEFAULT_SCALAR_REGISTRY\nfrom strawberry.types import ExecutionContext\nfrom strawberry.types.graphql import OperationType\nfrom strawberry.types.types import TypeDefinition\n\nfrom ..printer import print_schema\nfrom . import compat\nfrom .base import BaseSchema\nfrom .config import StrawberryConfig\nfrom .execute import execute, execute_sync\n\nif TYPE_CHECKING:\n from graphql import ExecutionContext as GraphQLExecutionContext\n\n from strawberry.custom_scalar import ScalarDefinition, ScalarWrapper\n from strawberry.directive import StrawberryDirective\n from strawberry.enum import EnumDefinition\n from strawberry.extensions import SchemaExtension\n from strawberry.field import StrawberryField\n from strawberry.types import ExecutionResult\n from strawberry.union import StrawberryUnion\n\nDEFAULT_ALLOWED_OPERATION_TYPES = {\n OperationType.QUERY,\n OperationType.MUTATION,\n OperationType.SUBSCRIPTION,\n}\n\n\nclass Schema(BaseSchema):\n def __init__(\n self,\n # TODO: can we make sure we only allow to pass\n # something that has been decorated?\n query: Type,\n mutation: Optional[Type] = None,\n subscription: Optional[Type] = None,\n directives: Iterable[StrawberryDirective] = (),\n types=(),\n extensions: Iterable[Union[Type[SchemaExtension], SchemaExtension]] = (),\n execution_context_class: Optional[Type[GraphQLExecutionContext]] = None,\n config: Optional[StrawberryConfig] = None,\n scalar_overrides: Optional[\n Dict[object, Union[Type, ScalarWrapper, ScalarDefinition]]\n ] = None,\n schema_directives: Iterable[object] = (),\n ):\n self.query = query\n self.mutation = mutation\n self.subscription = subscription\n\n self.extensions = extensions\n self.execution_context_class = execution_context_class\n self.config = config or StrawberryConfig()\n\n SCALAR_OVERRIDES_DICT_TYPE = Dict[\n object, Union[\"ScalarWrapper\", \"ScalarDefinition\"]\n ]\n\n scalar_registry: SCALAR_OVERRIDES_DICT_TYPE = {**DEFAULT_SCALAR_REGISTRY}\n if scalar_overrides:\n # TODO: check that the overrides are valid\n scalar_registry.update(cast(SCALAR_OVERRIDES_DICT_TYPE, scalar_overrides))\n\n self.schema_converter = GraphQLCoreConverter(self.config, scalar_registry)\n self.directives = directives\n self.schema_directives = list(schema_directives)\n\n query_type = self.schema_converter.from_object(query._type_definition)\n mutation_type = (\n self.schema_converter.from_object(mutation._type_definition)\n if mutation\n else None\n )\n subscription_type = (\n self.schema_converter.from_object(subscription._type_definition)\n if subscription\n else None\n )\n\n graphql_directives = [\n self.schema_converter.from_directive(directive) for directive in directives\n ]\n\n graphql_types = []\n for type_ in types:\n if compat.is_schema_directive(type_):\n graphql_directives.append(\n self.schema_converter.from_schema_directive(type_)\n )\n else:\n if hasattr(type_, \"_type_definition\"):\n if type_._type_definition.is_generic:\n type_ = StrawberryAnnotation(type_).resolve()\n graphql_type = self.schema_converter.from_maybe_optional(type_)\n if isinstance(graphql_type, GraphQLNonNull):\n graphql_type = graphql_type.of_type\n if not isinstance(graphql_type, GraphQLNamedType):\n raise TypeError(f\"{graphql_type} is not a named GraphQL Type\")\n graphql_types.append(graphql_type)\n\n try:\n self._schema = GraphQLSchema(\n query=query_type,\n mutation=mutation_type,\n subscription=subscription_type if subscription else None,\n directives=specified_directives + tuple(graphql_directives),\n types=graphql_types,\n extensions={\n GraphQLCoreConverter.DEFINITION_BACKREF: self,\n },\n )\n\n except TypeError as error:\n # GraphQL core throws a TypeError if there's any exception raised\n # during the schema creation, so we check if the cause was a\n # StrawberryError and raise it instead if that's the case.\n\n from strawberry.exceptions import StrawberryException\n\n if isinstance(error.__cause__, StrawberryException):\n raise error.__cause__ from None\n\n raise\n\n # attach our schema to the GraphQL schema instance\n self._schema._strawberry_schema = self # type: ignore\n\n self._warn_for_federation_directives()\n\n # Validate schema early because we want developers to know about\n # possible issues as soon as possible\n errors = validate_schema(self._schema)\n if errors:\n formatted_errors = \"\\n\\n\".join(f\"\u274c {error.message}\" for error in errors)\n raise ValueError(f\"Invalid Schema. Errors:\\n\\n{formatted_errors}\")\n\n def get_extensions(\n self, sync: bool = False\n ) -> List[Union[Type[SchemaExtension], SchemaExtension]]:\n extensions = list(self.extensions)\n\n if self.directives:\n extensions.append(DirectivesExtensionSync if sync else DirectivesExtension)\n\n return extensions\n\n @lru_cache()\n def get_type_by_name(\n self, name: str\n ) -> Optional[\n Union[TypeDefinition, ScalarDefinition, EnumDefinition, StrawberryUnion]\n ]:\n # TODO: respect auto_camel_case\n if name in self.schema_converter.type_map:\n return self.schema_converter.type_map[name].definition\n\n return None\n\n def get_field_for_type(\n self, field_name: str, type_name: str\n ) -> Optional[StrawberryField]:\n type_ = self.get_type_by_name(type_name)\n\n if not type_:\n return None # pragma: no cover\n\n assert isinstance(type_, TypeDefinition)\n\n return next(\n (\n field\n for field in type_.fields\n if self.config.name_converter.get_graphql_name(field) == field_name\n ),\n None,\n )\n\n @lru_cache()\n def get_directive_by_name(self, graphql_name: str) -> Optional[StrawberryDirective]:\n return next(\n (\n directive\n for directive in self.directives\n if self.config.name_converter.from_directive(directive) == graphql_name\n ),\n None,\n )\n\n async def execute(\n self,\n query: Optional[str],\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n allowed_operation_types: Optional[Iterable[OperationType]] = None,\n ) -> ExecutionResult:\n if allowed_operation_types is None:\n allowed_operation_types = DEFAULT_ALLOWED_OPERATION_TYPES\n\n # Create execution context\n execution_context = ExecutionContext(\n query=query,\n schema=self,\n context=context_value,\n root_value=root_value,\n variables=variable_values,\n provided_operation_name=operation_name,\n )\n\n result = await execute(\n self._schema,\n extensions=self.get_extensions(),\n execution_context_class=self.execution_context_class,\n execution_context=execution_context,\n allowed_operation_types=allowed_operation_types,\n process_errors=self.process_errors,\n )\n\n return result\n\n def execute_sync(\n self,\n query: Optional[str],\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n allowed_operation_types: Optional[Iterable[OperationType]] = None,\n ) -> ExecutionResult:\n if allowed_operation_types is None:\n allowed_operation_types = DEFAULT_ALLOWED_OPERATION_TYPES\n\n execution_context = ExecutionContext(\n query=query,\n schema=self,\n context=context_value,\n root_value=root_value,\n variables=variable_values,\n provided_operation_name=operation_name,\n )\n\n result = execute_sync(\n self._schema,\n extensions=self.get_extensions(sync=True),\n execution_context_class=self.execution_context_class,\n execution_context=execution_context,\n allowed_operation_types=allowed_operation_types,\n process_errors=self.process_errors,\n )\n\n return result\n\n async def subscribe(\n self,\n # TODO: make this optional when we support extensions\n query: str,\n variable_values: Optional[Dict[str, Any]] = None,\n context_value: Optional[Any] = None,\n root_value: Optional[Any] = None,\n operation_name: Optional[str] = None,\n ):\n return await subscribe(\n self._schema,\n parse(query),\n root_value=root_value,\n context_value=context_value,\n variable_values=variable_values,\n operation_name=operation_name,\n )\n\n def _warn_for_federation_directives(self):\n \"\"\"Raises a warning if the schema has any federation directives.\"\"\"\n from strawberry.federation.schema_directives import FederationDirective\n\n if any(\n type_\n for type_ in self.schema_converter.type_map.values()\n if any(\n directive\n for directive in (type_.definition.directives or [])\n if isinstance(directive, FederationDirective)\n )\n ):\n warnings.warn(\n \"Federation directive found in schema. \"\n \"Should use strawberry.federation.Schema instead.\",\n UserWarning,\n )\n\n def as_str(self) -> str:\n return print_schema(self)\n\n __str__ = as_str\n\n def introspect(self) -> Dict[str, Any]:\n \"\"\"Return the introspection query result for the current schema\n\n Raises:\n ValueError: If the introspection query fails due to an invalid schema\n \"\"\"\n introspection = self.execute_sync(get_introspection_query())\n if introspection.errors or not introspection.data:\n raise ValueError(f\"Invalid Schema. Errors {introspection.errors!r}\")\n\n return introspection.data\n", "path": "strawberry/schema/schema.py"}]} | 3,256 | 353 |
gh_patches_debug_21795 | rasdani/github-patches | git_diff | zulip__zulip-19703 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Indicate latest Zulip server version on ReadTheDocs installation and upgrade pages
At present, the Zulip installation and upgrade docs don't tell you what version of Zulip you'll be installing. While the installation/upgrade script doesn't require the user to know this, it would provide helpful context for someone who wants to understand what they are about to install.
We should add a line to each of the following pages that indicates the version of Zulip server that will be installed. This information should be updated automatically when a new version is released.
Pages to change:
* https://zulip.readthedocs.io/en/stable/production/upgrade-or-modify.html
* https://zulip.readthedocs.io/en/stable/production/install.html
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 #
2 # zulip-contributor-docs documentation build configuration file, created by
3 # sphinx-quickstart on Mon Aug 17 16:24:04 2015.
4 #
5 # This file is execfile()d with the current directory set to its
6 # containing dir.
7 #
8 # Note that not all possible configuration values are present in this
9 # autogenerated file.
10 #
11 # All configuration values have a default; values that are commented out
12 # serve to show the default.
13 import os
14 import sys
15 from typing import Any, Dict, Optional
16
17 # If extensions (or modules to document with autodoc) are in another directory,
18 # add these directories to sys.path here. If the directory is relative to the
19 # documentation root, use os.path.abspath to make it absolute, like shown here.
20 # sys.path.insert(0, os.path.abspath('.'))
21
22 sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
23 from version import ZULIP_VERSION
24
25 # -- General configuration ------------------------------------------------
26
27 # If your documentation needs a minimal Sphinx version, state it here.
28 # needs_sphinx = '1.0'
29
30 # Add any Sphinx extension module names here, as strings. They can be
31 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
32 # ones.
33 extensions = [
34 "myst_parser",
35 ]
36
37 myst_enable_extensions = [
38 "colon_fence",
39 ]
40
41 # Add any paths that contain templates here, relative to this directory.
42 templates_path = ["_templates"]
43
44 # The encoding of source files.
45 # source_encoding = 'utf-8-sig'
46
47 # The root toctree document.
48 root_doc = "index"
49
50 # General information about the project.
51 project = "Zulip"
52 copyright = "2012–2015 Dropbox, Inc., 2015–2021 Kandra Labs, Inc., and contributors"
53 author = "The Zulip Team"
54
55 # The version info for the project you're documenting, acts as replacement for
56 # |version| and |release|, also used in various other places throughout the
57 # built documents.
58 #
59 # The short X.Y version.
60 version = ZULIP_VERSION
61 # The full version, including alpha/beta/rc tags.
62 release = ZULIP_VERSION
63
64 # The language for content autogenerated by Sphinx. Refer to documentation
65 # for a list of supported languages.
66 #
67 # This is also used if you do content translation via gettext catalogs.
68 # Usually you set "language" from the command line for these cases.
69 language: Optional[str] = None
70
71 # There are two options for replacing |today|: either, you set today to some
72 # non-false value, then it is used:
73 # today = ''
74 # Else, today_fmt is used as the format for a strftime call.
75 # today_fmt = '%B %d, %Y'
76
77 # List of patterns, relative to source directory, that match files and
78 # directories to ignore when looking for source files.
79 exclude_patterns = ["_build", "README.md"]
80
81 # The reST default role (used for this markup: `text`) to use for all
82 # documents.
83 # default_role = None
84
85 # If true, '()' will be appended to :func: etc. cross-reference text.
86 # add_function_parentheses = True
87
88 # If true, the current module name will be prepended to all description
89 # unit titles (such as .. function::).
90 # add_module_names = True
91
92 # If true, sectionauthor and moduleauthor directives will be shown in the
93 # output. They are ignored by default.
94 # show_authors = False
95
96 # The name of the Pygments (syntax highlighting) style to use.
97 pygments_style = "sphinx"
98
99 # A list of ignored prefixes for module index sorting.
100 # modindex_common_prefix = []
101
102 # If true, keep warnings as "system message" paragraphs in the built documents.
103 # keep_warnings = False
104
105 # If true, `todo` and `todoList` produce output, else they produce nothing.
106 todo_include_todos = False
107
108
109 # -- Options for HTML output ----------------------------------------------
110
111 # The theme to use for HTML and HTML Help pages. See the documentation for
112 # a list of builtin themes.
113
114 # Read The Docs can't import sphinx_rtd_theme, so don't import it there.
115 on_rtd = os.environ.get("READTHEDOCS", None) == "True"
116
117 if not on_rtd:
118 import sphinx_rtd_theme
119
120 html_theme = "sphinx_rtd_theme"
121 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
122 else:
123 html_theme = "sphinx_rtd_theme"
124 html_style = None
125 html_theme_options = {"collapse_navigation": False}
126 using_rtd_theme = True
127
128 # Theme options are theme-specific and customize the look and feel of a theme
129 # further. For a list of options available for each theme, see the
130 # documentation.
131 html_theme_options = {"collapse_navigation": False, "logo_only": True}
132
133 # Add any paths that contain custom themes here, relative to this directory.
134 # html_theme_path = []
135
136 # The name for this set of Sphinx documents. If None, it defaults to
137 # "<project> v<release> documentation".
138 # html_title = None
139
140 # A shorter title for the navigation bar. Default is the same as html_title.
141 # html_short_title = None
142
143 # The name of an image file (relative to this directory) to place at the top
144 # of the sidebar.
145 html_logo = "images/zulip-logo.svg"
146
147 # The name of an image file (within the static path) to use as favicon of the
148 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
149 # pixels large.
150 # html_favicon = None
151
152 # Add any paths that contain custom static files (such as style sheets) here,
153 # relative to this directory. They are copied after the builtin static files,
154 # so a file named "default.css" will overwrite the builtin "default.css".
155 html_static_path = ["_static"]
156
157 # Add any extra paths that contain custom files (such as robots.txt or
158 # .htaccess) here, relative to this directory. These files are copied
159 # directly to the root of the documentation.
160 # html_extra_path = []
161
162 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
163 # using the given strftime format.
164 # html_last_updated_fmt = '%b %d, %Y'
165
166 # If true, SmartyPants will be used to convert quotes and dashes to
167 # typographically correct entities.
168 # html_use_smartypants = True
169
170 # Custom sidebar templates, maps document names to template names.
171 # html_sidebars = {}
172
173 # Additional templates that should be rendered to pages, maps page names to
174 # template names.
175 # html_additional_pages = {}
176
177 # If false, no module index is generated.
178 # html_domain_indices = True
179
180 # If false, no index is generated.
181 # html_use_index = True
182
183 # If true, the index is split into individual pages for each letter.
184 # html_split_index = False
185
186 # If true, links to the reST sources are added to the pages.
187 # html_show_sourcelink = True
188
189 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
190 # html_show_sphinx = True
191
192 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
193 # html_show_copyright = True
194
195 # If true, an OpenSearch description file will be output, and all pages will
196 # contain a <link> tag referring to it. The value of this option must be the
197 # base URL from which the finished HTML is served.
198 # html_use_opensearch = ''
199
200 # This is the file name suffix for HTML files (e.g. ".xhtml").
201 # html_file_suffix = None
202
203 # Language to be used for generating the HTML full-text search index.
204 # Sphinx supports the following languages:
205 # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
206 # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
207 # html_search_language = 'en'
208
209 # A dictionary with options for the search language support, empty by default.
210 # Now only 'ja' uses this config value
211 # html_search_options = {'type': 'default'}
212
213 # The name of a javascript file (relative to the configuration directory) that
214 # implements a search results scorer. If empty, the default will be used.
215 # html_search_scorer = 'scorer.js'
216
217 # Output file base name for HTML help builder.
218 htmlhelp_basename = "zulip-contributor-docsdoc"
219
220 # -- Options for LaTeX output ---------------------------------------------
221
222 latex_elements: Dict[str, str] = {
223 # The paper size ('letterpaper' or 'a4paper').
224 #'papersize': 'letterpaper',
225 # The font size ('10pt', '11pt' or '12pt').
226 #'pointsize': '10pt',
227 # Additional stuff for the LaTeX preamble.
228 #'preamble': '',
229 # Latex figure (float) alignment
230 #'figure_align': 'htbp',
231 }
232
233 # Grouping the document tree into LaTeX files. List of tuples
234 # (source start file, target name, title,
235 # author, documentclass [howto, manual, or own class]).
236 latex_documents = [
237 (root_doc, "zulip-contributor-docs.tex", "Zulip Documentation", "The Zulip Team", "manual"),
238 ]
239
240 # The name of an image file (relative to this directory) to place at the top of
241 # the title page.
242 # latex_logo = None
243
244 # For "manual" documents, if this is true, then toplevel headings are parts,
245 # not chapters.
246 # latex_use_parts = False
247
248 # If true, show page references after internal links.
249 # latex_show_pagerefs = False
250
251 # If true, show URL addresses after external links.
252 # latex_show_urls = False
253
254 # Documents to append as an appendix to all manuals.
255 # latex_appendices = []
256
257 # If false, no module index is generated.
258 # latex_domain_indices = True
259
260
261 # -- Options for manual page output ---------------------------------------
262
263 # One entry per manual page. List of tuples
264 # (source start file, name, description, authors, manual section).
265 man_pages = [
266 (root_doc, "zulip-contributor-docs", "Zulip Documentation", [author], 1),
267 ]
268
269 # If true, show URL addresses after external links.
270 # man_show_urls = False
271
272
273 # -- Options for Texinfo output -------------------------------------------
274
275 # Grouping the document tree into Texinfo files. List of tuples
276 # (source start file, target name, title, author,
277 # dir menu entry, description, category)
278 texinfo_documents = [
279 (
280 root_doc,
281 "zulip-contributor-docs",
282 "Zulip Documentation",
283 author,
284 "zulip-contributor-docs",
285 "Documentation for contributing to Zulip.",
286 "Miscellaneous",
287 ),
288 ]
289
290 # Documents to append as an appendix to all manuals.
291 # texinfo_appendices = []
292
293 # If false, no module index is generated.
294 # texinfo_domain_indices = True
295
296 # How to display URL addresses: 'footnote', 'no', or 'inline'.
297 # texinfo_show_urls = 'footnote'
298
299 # If true, do not generate a @detailmenu in the "Top" node's menu.
300 # texinfo_no_detailmenu = False
301
302 # The suffix(es) of source filenames. You can specify multiple suffix
303 # as a dictionary mapping file extensions to file types
304 # https://www.sphinx-doc.org/en/master/usage/markdown.html
305 source_suffix = {
306 ".rst": "restructuredtext",
307 ".md": "markdown",
308 }
309
310 suppress_warnings = [
311 "myst.header",
312 ]
313
314
315 def setup(app: Any) -> None:
316 # overrides for wide tables in RTD theme
317 app.add_css_file("theme_overrides.css") # path relative to _static
318
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -20,7 +20,7 @@
# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
-from version import ZULIP_VERSION
+from version import LATEST_RELEASE_VERSION, ZULIP_VERSION
# -- General configuration ------------------------------------------------
@@ -36,6 +36,7 @@
myst_enable_extensions = [
"colon_fence",
+ "substitution",
]
# Add any paths that contain templates here, relative to this directory.
@@ -105,6 +106,10 @@
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
+myst_substitutions = {
+ "LATEST_RELEASE_VERSION": LATEST_RELEASE_VERSION,
+}
+
# -- Options for HTML output ----------------------------------------------
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -20,7 +20,7 @@\n # sys.path.insert(0, os.path.abspath('.'))\n \n sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\")))\n-from version import ZULIP_VERSION\n+from version import LATEST_RELEASE_VERSION, ZULIP_VERSION\n \n # -- General configuration ------------------------------------------------\n \n@@ -36,6 +36,7 @@\n \n myst_enable_extensions = [\n \"colon_fence\",\n+ \"substitution\",\n ]\n \n # Add any paths that contain templates here, relative to this directory.\n@@ -105,6 +106,10 @@\n # If true, `todo` and `todoList` produce output, else they produce nothing.\n todo_include_todos = False\n \n+myst_substitutions = {\n+ \"LATEST_RELEASE_VERSION\": LATEST_RELEASE_VERSION,\n+}\n+\n \n # -- Options for HTML output ----------------------------------------------\n", "issue": "Indicate latest Zulip server version on ReadTheDocs installation and upgrade pages \nAt present, the Zulip installation and upgrade docs don't tell you what version of Zulip you'll be installing. While the installation/upgrade script doesn't require the user to know this, it would provide helpful context for someone who wants to understand what they are about to install.\r\n\r\nWe should add a line to each of the following pages that indicates the version of Zulip server that will be installed. This information should be updated automatically when a new version is released.\r\n\r\nPages to change:\r\n* https://zulip.readthedocs.io/en/stable/production/upgrade-or-modify.html\r\n* https://zulip.readthedocs.io/en/stable/production/install.html\n", "before_files": [{"content": "#\n# zulip-contributor-docs documentation build configuration file, created by\n# sphinx-quickstart on Mon Aug 17 16:24:04 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\nimport os\nimport sys\nfrom typing import Any, Dict, Optional\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n# sys.path.insert(0, os.path.abspath('.'))\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\")))\nfrom version import ZULIP_VERSION\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"myst_parser\",\n]\n\nmyst_enable_extensions = [\n \"colon_fence\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The root toctree document.\nroot_doc = \"index\"\n\n# General information about the project.\nproject = \"Zulip\"\ncopyright = \"2012\u20132015 Dropbox, Inc., 2015\u20132021 Kandra Labs, Inc., and contributors\"\nauthor = \"The Zulip Team\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = ZULIP_VERSION\n# The full version, including alpha/beta/rc tags.\nrelease = ZULIP_VERSION\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage: Optional[str] = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\", \"README.md\"]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n\n# Read The Docs can't import sphinx_rtd_theme, so don't import it there.\non_rtd = os.environ.get(\"READTHEDOCS\", None) == \"True\"\n\nif not on_rtd:\n import sphinx_rtd_theme\n\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nelse:\n html_theme = \"sphinx_rtd_theme\"\n html_style = None\n html_theme_options = {\"collapse_navigation\": False}\n using_rtd_theme = True\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\"collapse_navigation\": False, \"logo_only\": True}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = \"images/zulip-logo.svg\"\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n# html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"zulip-contributor-docsdoc\"\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements: Dict[str, str] = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n # Latex figure (float) alignment\n #'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (root_doc, \"zulip-contributor-docs.tex\", \"Zulip Documentation\", \"The Zulip Team\", \"manual\"),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (root_doc, \"zulip-contributor-docs\", \"Zulip Documentation\", [author], 1),\n]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n root_doc,\n \"zulip-contributor-docs\",\n \"Zulip Documentation\",\n author,\n \"zulip-contributor-docs\",\n \"Documentation for contributing to Zulip.\",\n \"Miscellaneous\",\n ),\n]\n\n# Documents to append as an appendix to all manuals.\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n# texinfo_no_detailmenu = False\n\n# The suffix(es) of source filenames. You can specify multiple suffix\n# as a dictionary mapping file extensions to file types\n# https://www.sphinx-doc.org/en/master/usage/markdown.html\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n \".md\": \"markdown\",\n}\n\nsuppress_warnings = [\n \"myst.header\",\n]\n\n\ndef setup(app: Any) -> None:\n # overrides for wide tables in RTD theme\n app.add_css_file(\"theme_overrides.css\") # path relative to _static\n", "path": "docs/conf.py"}], "after_files": [{"content": "#\n# zulip-contributor-docs documentation build configuration file, created by\n# sphinx-quickstart on Mon Aug 17 16:24:04 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\nimport os\nimport sys\nfrom typing import Any, Dict, Optional\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n# sys.path.insert(0, os.path.abspath('.'))\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\")))\nfrom version import LATEST_RELEASE_VERSION, ZULIP_VERSION\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"myst_parser\",\n]\n\nmyst_enable_extensions = [\n \"colon_fence\",\n \"substitution\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The encoding of source files.\n# source_encoding = 'utf-8-sig'\n\n# The root toctree document.\nroot_doc = \"index\"\n\n# General information about the project.\nproject = \"Zulip\"\ncopyright = \"2012\u20132015 Dropbox, Inc., 2015\u20132021 Kandra Labs, Inc., and contributors\"\nauthor = \"The Zulip Team\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = ZULIP_VERSION\n# The full version, including alpha/beta/rc tags.\nrelease = ZULIP_VERSION\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage: Optional[str] = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n# today = ''\n# Else, today_fmt is used as the format for a strftime call.\n# today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = [\"_build\", \"README.md\"]\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n# default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n# add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n# add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n# show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# A list of ignored prefixes for module index sorting.\n# modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n# keep_warnings = False\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\nmyst_substitutions = {\n \"LATEST_RELEASE_VERSION\": LATEST_RELEASE_VERSION,\n}\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n\n# Read The Docs can't import sphinx_rtd_theme, so don't import it there.\non_rtd = os.environ.get(\"READTHEDOCS\", None) == \"True\"\n\nif not on_rtd:\n import sphinx_rtd_theme\n\n html_theme = \"sphinx_rtd_theme\"\n html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\nelse:\n html_theme = \"sphinx_rtd_theme\"\n html_style = None\n html_theme_options = {\"collapse_navigation\": False}\n using_rtd_theme = True\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\"collapse_navigation\": False, \"logo_only\": True}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n# html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n# html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\nhtml_logo = \"images/zulip-logo.svg\"\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n# html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n# html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n# html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n# html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n# html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n# html_additional_pages = {}\n\n# If false, no module index is generated.\n# html_domain_indices = True\n\n# If false, no index is generated.\n# html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n# html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n# html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n# html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n# html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n# html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n# html_file_suffix = None\n\n# Language to be used for generating the HTML full-text search index.\n# Sphinx supports the following languages:\n# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'\n# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'\n# html_search_language = 'en'\n\n# A dictionary with options for the search language support, empty by default.\n# Now only 'ja' uses this config value\n# html_search_options = {'type': 'default'}\n\n# The name of a javascript file (relative to the configuration directory) that\n# implements a search results scorer. If empty, the default will be used.\n# html_search_scorer = 'scorer.js'\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"zulip-contributor-docsdoc\"\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements: Dict[str, str] = {\n # The paper size ('letterpaper' or 'a4paper').\n #'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #'preamble': '',\n # Latex figure (float) alignment\n #'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (root_doc, \"zulip-contributor-docs.tex\", \"Zulip Documentation\", \"The Zulip Team\", \"manual\"),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n# latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n# latex_use_parts = False\n\n# If true, show page references after internal links.\n# latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n# latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n# latex_appendices = []\n\n# If false, no module index is generated.\n# latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (root_doc, \"zulip-contributor-docs\", \"Zulip Documentation\", [author], 1),\n]\n\n# If true, show URL addresses after external links.\n# man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n root_doc,\n \"zulip-contributor-docs\",\n \"Zulip Documentation\",\n author,\n \"zulip-contributor-docs\",\n \"Documentation for contributing to Zulip.\",\n \"Miscellaneous\",\n ),\n]\n\n# Documents to append as an appendix to all manuals.\n# texinfo_appendices = []\n\n# If false, no module index is generated.\n# texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n# texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n# texinfo_no_detailmenu = False\n\n# The suffix(es) of source filenames. You can specify multiple suffix\n# as a dictionary mapping file extensions to file types\n# https://www.sphinx-doc.org/en/master/usage/markdown.html\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n \".md\": \"markdown\",\n}\n\nsuppress_warnings = [\n \"myst.header\",\n]\n\n\ndef setup(app: Any) -> None:\n # overrides for wide tables in RTD theme\n app.add_css_file(\"theme_overrides.css\") # path relative to _static\n", "path": "docs/conf.py"}]} | 3,907 | 216 |
gh_patches_debug_9378 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-346 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New user creation results in TypeError
If one wants to create a new user via the network settings an error will occur. The user gets created anyway, but this should be fixed quite fast.

New user creation results in TypeError
If one wants to create a new user via the network settings an error will occur. The user gets created anyway, but this should be fixed quite fast.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cms/forms/users/user_profile_form.py`
Content:
```
1 """
2 Form for creating a user object
3 """
4 import logging
5
6 from django import forms
7
8 from ...models import UserProfile
9
10
11 logger = logging.getLogger(__name__)
12
13
14 class UserProfileForm(forms.ModelForm):
15
16 class Meta:
17 model = UserProfile
18 fields = [
19 'regions',
20 'organization'
21 ]
22
23 # pylint: disable=arguments-differ
24 def save(self, *args, **kwargs):
25
26 logger.info(
27 'UserProfileForm saved with args %s and kwargs %s',
28 args,
29 kwargs
30 )
31
32 # pop kwarg to make sure the super class does not get this param
33 user = kwargs.pop('user', None)
34
35 if not self.instance.id:
36 # don't commit saving of ModelForm, because required user field is still missing
37 kwargs['commit'] = False
38
39 # save ModelForm
40 user_profile = super(UserProfileForm, self).save(*args, **kwargs)
41
42 if not self.instance.id:
43 user_profile.user = user
44 user_profile.save()
45 # check if called from UserProfileForm or RegionUserProfileForm
46 if 'regions' in self.cleaned_data:
47 # regions can't be saved if commit=False on the ModelForm, so we have to save them explicitly
48 user_profile.regions = self.cleaned_data['regions']
49 user_profile.save()
50
51 return user_profile
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cms/forms/users/user_profile_form.py b/src/cms/forms/users/user_profile_form.py
--- a/src/cms/forms/users/user_profile_form.py
+++ b/src/cms/forms/users/user_profile_form.py
@@ -45,7 +45,6 @@
# check if called from UserProfileForm or RegionUserProfileForm
if 'regions' in self.cleaned_data:
# regions can't be saved if commit=False on the ModelForm, so we have to save them explicitly
- user_profile.regions = self.cleaned_data['regions']
- user_profile.save()
+ user_profile.regions.set(self.cleaned_data['regions'])
return user_profile
| {"golden_diff": "diff --git a/src/cms/forms/users/user_profile_form.py b/src/cms/forms/users/user_profile_form.py\n--- a/src/cms/forms/users/user_profile_form.py\n+++ b/src/cms/forms/users/user_profile_form.py\n@@ -45,7 +45,6 @@\n # check if called from UserProfileForm or RegionUserProfileForm\n if 'regions' in self.cleaned_data:\n # regions can't be saved if commit=False on the ModelForm, so we have to save them explicitly\n- user_profile.regions = self.cleaned_data['regions']\n- user_profile.save()\n+ user_profile.regions.set(self.cleaned_data['regions'])\n \n return user_profile\n", "issue": "New user creation results in TypeError\nIf one wants to create a new user via the network settings an error will occur. The user gets created anyway, but this should be fixed quite fast.\r\n\r\n\r\n\nNew user creation results in TypeError\nIf one wants to create a new user via the network settings an error will occur. The user gets created anyway, but this should be fixed quite fast.\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nForm for creating a user object\n\"\"\"\nimport logging\n\nfrom django import forms\n\nfrom ...models import UserProfile\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass UserProfileForm(forms.ModelForm):\n\n class Meta:\n model = UserProfile\n fields = [\n 'regions',\n 'organization'\n ]\n\n # pylint: disable=arguments-differ\n def save(self, *args, **kwargs):\n\n logger.info(\n 'UserProfileForm saved with args %s and kwargs %s',\n args,\n kwargs\n )\n\n # pop kwarg to make sure the super class does not get this param\n user = kwargs.pop('user', None)\n\n if not self.instance.id:\n # don't commit saving of ModelForm, because required user field is still missing\n kwargs['commit'] = False\n\n # save ModelForm\n user_profile = super(UserProfileForm, self).save(*args, **kwargs)\n\n if not self.instance.id:\n user_profile.user = user\n user_profile.save()\n # check if called from UserProfileForm or RegionUserProfileForm\n if 'regions' in self.cleaned_data:\n # regions can't be saved if commit=False on the ModelForm, so we have to save them explicitly\n user_profile.regions = self.cleaned_data['regions']\n user_profile.save()\n\n return user_profile\n", "path": "src/cms/forms/users/user_profile_form.py"}], "after_files": [{"content": "\"\"\"\nForm for creating a user object\n\"\"\"\nimport logging\n\nfrom django import forms\n\nfrom ...models import UserProfile\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass UserProfileForm(forms.ModelForm):\n\n class Meta:\n model = UserProfile\n fields = [\n 'regions',\n 'organization'\n ]\n\n # pylint: disable=arguments-differ\n def save(self, *args, **kwargs):\n\n logger.info(\n 'UserProfileForm saved with args %s and kwargs %s',\n args,\n kwargs\n )\n\n # pop kwarg to make sure the super class does not get this param\n user = kwargs.pop('user', None)\n\n if not self.instance.id:\n # don't commit saving of ModelForm, because required user field is still missing\n kwargs['commit'] = False\n\n # save ModelForm\n user_profile = super(UserProfileForm, self).save(*args, **kwargs)\n\n if not self.instance.id:\n user_profile.user = user\n user_profile.save()\n # check if called from UserProfileForm or RegionUserProfileForm\n if 'regions' in self.cleaned_data:\n # regions can't be saved if commit=False on the ModelForm, so we have to save them explicitly\n user_profile.regions.set(self.cleaned_data['regions'])\n\n return user_profile\n", "path": "src/cms/forms/users/user_profile_form.py"}]} | 861 | 140 |
gh_patches_debug_321 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-5424 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove possibel unused constant
At first sight looks like isn't used anymore after https://github.com/rtfd/readthedocs.org/pull/5383
https://github.com/rtfd/readthedocs.org/blob/78c34c904b347110b2cd545b4b5a80ed526590f7/readthedocs/core/models.py#L13-L13
We should still double check and make sure tests are passing after the removal.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/core/models.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Models for the core app."""
4 import logging
5
6 from annoying.fields import AutoOneToOneField
7 from django.db import models
8 from django.urls import reverse
9 from django.utils.translation import ugettext
10 from django.utils.translation import ugettext_lazy as _
11
12
13 STANDARD_EMAIL = '[email protected]'
14
15 log = logging.getLogger(__name__)
16
17
18 class UserProfile(models.Model):
19
20 """Additional information about a User."""
21
22 user = AutoOneToOneField(
23 'auth.User',
24 verbose_name=_('User'),
25 related_name='profile',
26 )
27 whitelisted = models.BooleanField(_('Whitelisted'), default=False)
28 banned = models.BooleanField(_('Banned'), default=False)
29 homepage = models.CharField(_('Homepage'), max_length=100, blank=True)
30 allow_ads = models.BooleanField(
31 _('See paid advertising'),
32 help_text=_('If unchecked, you will still see community ads.'),
33 default=True,
34 )
35
36 def __str__(self):
37 return (
38 ugettext("%(username)s's profile") %
39 {'username': self.user.username}
40 )
41
42 def get_absolute_url(self):
43 return reverse(
44 'profiles_profile_detail',
45 kwargs={'username': self.user.username},
46 )
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/core/models.py b/readthedocs/core/models.py
--- a/readthedocs/core/models.py
+++ b/readthedocs/core/models.py
@@ -10,8 +10,6 @@
from django.utils.translation import ugettext_lazy as _
-STANDARD_EMAIL = '[email protected]'
-
log = logging.getLogger(__name__)
| {"golden_diff": "diff --git a/readthedocs/core/models.py b/readthedocs/core/models.py\n--- a/readthedocs/core/models.py\n+++ b/readthedocs/core/models.py\n@@ -10,8 +10,6 @@\n from django.utils.translation import ugettext_lazy as _\n \n \n-STANDARD_EMAIL = '[email protected]'\n-\n log = logging.getLogger(__name__)\n", "issue": "Remove possibel unused constant\nAt first sight looks like isn't used anymore after https://github.com/rtfd/readthedocs.org/pull/5383\r\n\r\nhttps://github.com/rtfd/readthedocs.org/blob/78c34c904b347110b2cd545b4b5a80ed526590f7/readthedocs/core/models.py#L13-L13\r\n\r\nWe should still double check and make sure tests are passing after the removal.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Models for the core app.\"\"\"\nimport logging\n\nfrom annoying.fields import AutoOneToOneField\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext\nfrom django.utils.translation import ugettext_lazy as _\n\n\nSTANDARD_EMAIL = '[email protected]'\n\nlog = logging.getLogger(__name__)\n\n\nclass UserProfile(models.Model):\n\n \"\"\"Additional information about a User.\"\"\"\n\n user = AutoOneToOneField(\n 'auth.User',\n verbose_name=_('User'),\n related_name='profile',\n )\n whitelisted = models.BooleanField(_('Whitelisted'), default=False)\n banned = models.BooleanField(_('Banned'), default=False)\n homepage = models.CharField(_('Homepage'), max_length=100, blank=True)\n allow_ads = models.BooleanField(\n _('See paid advertising'),\n help_text=_('If unchecked, you will still see community ads.'),\n default=True,\n )\n\n def __str__(self):\n return (\n ugettext(\"%(username)s's profile\") %\n {'username': self.user.username}\n )\n\n def get_absolute_url(self):\n return reverse(\n 'profiles_profile_detail',\n kwargs={'username': self.user.username},\n )\n", "path": "readthedocs/core/models.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Models for the core app.\"\"\"\nimport logging\n\nfrom annoying.fields import AutoOneToOneField\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.translation import ugettext\nfrom django.utils.translation import ugettext_lazy as _\n\n\nlog = logging.getLogger(__name__)\n\n\nclass UserProfile(models.Model):\n\n \"\"\"Additional information about a User.\"\"\"\n\n user = AutoOneToOneField(\n 'auth.User',\n verbose_name=_('User'),\n related_name='profile',\n )\n whitelisted = models.BooleanField(_('Whitelisted'), default=False)\n banned = models.BooleanField(_('Banned'), default=False)\n homepage = models.CharField(_('Homepage'), max_length=100, blank=True)\n allow_ads = models.BooleanField(\n _('See paid advertising'),\n help_text=_('If unchecked, you will still see community ads.'),\n default=True,\n )\n\n def __str__(self):\n return (\n ugettext(\"%(username)s's profile\") %\n {'username': self.user.username}\n )\n\n def get_absolute_url(self):\n return reverse(\n 'profiles_profile_detail',\n kwargs={'username': self.user.username},\n )\n", "path": "readthedocs/core/models.py"}]} | 723 | 79 |
gh_patches_debug_1133 | rasdani/github-patches | git_diff | joke2k__faker-512 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Using É, é (e-acute) in emails.
It looks that É, é (e-acute) symbols are not appropriate for valid email. I used https://pypi.python.org/pypi/robotframework-faker/ which uses this library and the following email was returned:
andré[email protected]
But email verification was failed for this email.
Could you remove É, é and other such letters if they are present from valid email generation?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/providers/internet/de_DE/__init__.py`
Content:
```
1 # coding=utf-8
2
3 from __future__ import unicode_literals
4 from .. import Provider as InternetProvider
5
6 class Provider(InternetProvider):
7
8 free_email_domains = (
9 'aol.de', 'gmail.com', 'gmx.de', 'googlemail.com', 'hotmail.de',
10 'web.de', 'yahoo.de',
11 )
12 tlds = ('com', 'com', 'com', 'net', 'org', 'de', 'de', 'de', )
13
14 replacements = (
15 ('ä', 'ae'), ('Ä', 'Ae'),
16 ('ö', 'oe'), ('Ö', 'Oe'),
17 ('ü', 'ue'), ('Ü', 'Ue'),
18 ('ß', 'ss'),
19 )
20
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/faker/providers/internet/de_DE/__init__.py b/faker/providers/internet/de_DE/__init__.py
--- a/faker/providers/internet/de_DE/__init__.py
+++ b/faker/providers/internet/de_DE/__init__.py
@@ -15,5 +15,7 @@
('ä', 'ae'), ('Ä', 'Ae'),
('ö', 'oe'), ('Ö', 'Oe'),
('ü', 'ue'), ('Ü', 'Ue'),
+ ('é', 'e'), ('É', 'E'),
+ ('à', 'a'), ('À', 'A'),
('ß', 'ss'),
)
| {"golden_diff": "diff --git a/faker/providers/internet/de_DE/__init__.py b/faker/providers/internet/de_DE/__init__.py\n--- a/faker/providers/internet/de_DE/__init__.py\n+++ b/faker/providers/internet/de_DE/__init__.py\n@@ -15,5 +15,7 @@\n ('\u00e4', 'ae'), ('\u00c4', 'Ae'),\n ('\u00f6', 'oe'), ('\u00d6', 'Oe'),\n ('\u00fc', 'ue'), ('\u00dc', 'Ue'),\n+ ('\u00e9', 'e'), ('\u00c9', 'E'),\n+ ('\u00e0', 'a'), ('\u00c0', 'A'),\n ('\u00df', 'ss'),\n )\n", "issue": "Using \u00c9, \u00e9 (e-acute) in emails.\nIt looks that \u00c9, \u00e9 (e-acute) symbols are not appropriate for valid email. I used https://pypi.python.org/pypi/robotframework-faker/ which uses this library and the following email was returned: \r\nandr\[email protected]\r\n\r\nBut email verification was failed for this email. \r\nCould you remove \u00c9, \u00e9 and other such letters if they are present from valid email generation?\n", "before_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\nfrom .. import Provider as InternetProvider\n\nclass Provider(InternetProvider):\n\n free_email_domains = (\n 'aol.de', 'gmail.com', 'gmx.de', 'googlemail.com', 'hotmail.de',\n 'web.de', 'yahoo.de',\n )\n tlds = ('com', 'com', 'com', 'net', 'org', 'de', 'de', 'de', )\n\n replacements = (\n ('\u00e4', 'ae'), ('\u00c4', 'Ae'),\n ('\u00f6', 'oe'), ('\u00d6', 'Oe'),\n ('\u00fc', 'ue'), ('\u00dc', 'Ue'),\n ('\u00df', 'ss'),\n )\n", "path": "faker/providers/internet/de_DE/__init__.py"}], "after_files": [{"content": "# coding=utf-8\n\nfrom __future__ import unicode_literals\nfrom .. import Provider as InternetProvider\n\nclass Provider(InternetProvider):\n\n free_email_domains = (\n 'aol.de', 'gmail.com', 'gmx.de', 'googlemail.com', 'hotmail.de',\n 'web.de', 'yahoo.de',\n )\n tlds = ('com', 'com', 'com', 'net', 'org', 'de', 'de', 'de', )\n\n replacements = (\n ('\u00e4', 'ae'), ('\u00c4', 'Ae'),\n ('\u00f6', 'oe'), ('\u00d6', 'Oe'),\n ('\u00fc', 'ue'), ('\u00dc', 'Ue'),\n ('\u00e9', 'e'), ('\u00c9', 'E'),\n ('\u00e0', 'a'), ('\u00c0', 'A'),\n ('\u00df', 'ss'),\n )\n", "path": "faker/providers/internet/de_DE/__init__.py"}]} | 550 | 147 |
gh_patches_debug_32914 | rasdani/github-patches | git_diff | getpelican__pelican-2440 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Category/Tag/Author slugs are not settable
URLWrapper objects have a setter for their 'slug' property, but all of the concrete URLWrapper subclasses override the _getter_ for 'slug', which, because of the way Python's property accessors work, makes the setter inaccessible. This breaks the 'category_meta' plugin and probably other things as well.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pelican/urlwrappers.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 import functools
5 import logging
6 import os
7
8 import six
9
10 from pelican.utils import python_2_unicode_compatible, slugify
11
12 logger = logging.getLogger(__name__)
13
14
15 @python_2_unicode_compatible
16 @functools.total_ordering
17 class URLWrapper(object):
18 def __init__(self, name, settings):
19 self.settings = settings
20 self._name = name
21 self._slug = None
22 self._slug_from_name = True
23
24 @property
25 def name(self):
26 return self._name
27
28 @name.setter
29 def name(self, name):
30 self._name = name
31 # if slug wasn't explicitly set, it needs to be regenerated from name
32 # so, changing name should reset slug for slugification
33 if self._slug_from_name:
34 self._slug = None
35
36 @property
37 def slug(self):
38 if self._slug is None:
39 self._slug = slugify(
40 self.name,
41 regex_subs=self.settings.get('SLUG_REGEX_SUBSTITUTIONS', []))
42 return self._slug
43
44 @slug.setter
45 def slug(self, slug):
46 # if slug is expliticly set, changing name won't alter slug
47 self._slug_from_name = False
48 self._slug = slug
49
50 def as_dict(self):
51 d = self.__dict__
52 d['name'] = self.name
53 d['slug'] = self.slug
54 return d
55
56 def __hash__(self):
57 return hash(self.slug)
58
59 def _normalize_key(self, key):
60 subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])
61 return six.text_type(slugify(key, regex_subs=subs))
62
63 def __eq__(self, other):
64 if isinstance(other, self.__class__):
65 return self.slug == other.slug
66 if isinstance(other, six.text_type):
67 return self.slug == self._normalize_key(other)
68 return False
69
70 def __ne__(self, other):
71 if isinstance(other, self.__class__):
72 return self.slug != other.slug
73 if isinstance(other, six.text_type):
74 return self.slug != self._normalize_key(other)
75 return True
76
77 def __lt__(self, other):
78 if isinstance(other, self.__class__):
79 return self.slug < other.slug
80 if isinstance(other, six.text_type):
81 return self.slug < self._normalize_key(other)
82 return False
83
84 def __str__(self):
85 return self.name
86
87 def __repr__(self):
88 return '<{} {}>'.format(type(self).__name__, repr(self._name))
89
90 def _from_settings(self, key, get_page_name=False):
91 """Returns URL information as defined in settings.
92
93 When get_page_name=True returns URL without anything after {slug} e.g.
94 if in settings: CATEGORY_URL="cat/{slug}.html" this returns
95 "cat/{slug}" Useful for pagination.
96
97 """
98 setting = "%s_%s" % (self.__class__.__name__.upper(), key)
99 value = self.settings[setting]
100 if not isinstance(value, six.string_types):
101 logger.warning('%s is set to %s', setting, value)
102 return value
103 else:
104 if get_page_name:
105 return os.path.splitext(value)[0].format(**self.as_dict())
106 else:
107 return value.format(**self.as_dict())
108
109 page_name = property(functools.partial(_from_settings, key='URL',
110 get_page_name=True))
111 url = property(functools.partial(_from_settings, key='URL'))
112 save_as = property(functools.partial(_from_settings, key='SAVE_AS'))
113
114
115 class Category(URLWrapper):
116 @property
117 def slug(self):
118 if self._slug is None:
119 if 'CATEGORY_REGEX_SUBSTITUTIONS' in self.settings:
120 subs = self.settings['CATEGORY_REGEX_SUBSTITUTIONS']
121 else:
122 subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])
123 self._slug = slugify(self.name, regex_subs=subs)
124 return self._slug
125
126
127 class Tag(URLWrapper):
128 def __init__(self, name, *args, **kwargs):
129 super(Tag, self).__init__(name.strip(), *args, **kwargs)
130
131 @property
132 def slug(self):
133 if self._slug is None:
134 if 'TAG_REGEX_SUBSTITUTIONS' in self.settings:
135 subs = self.settings['TAG_REGEX_SUBSTITUTIONS']
136 else:
137 subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])
138 self._slug = slugify(self.name, regex_subs=subs)
139 return self._slug
140
141
142 class Author(URLWrapper):
143 @property
144 def slug(self):
145 if self._slug is None:
146 if 'AUTHOR_REGEX_SUBSTITUTIONS' in self.settings:
147 subs = self.settings['AUTHOR_REGEX_SUBSTITUTIONS']
148 else:
149 subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])
150 self._slug = slugify(self.name, regex_subs=subs)
151 return self._slug
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pelican/urlwrappers.py b/pelican/urlwrappers.py
--- a/pelican/urlwrappers.py
+++ b/pelican/urlwrappers.py
@@ -36,9 +36,17 @@
@property
def slug(self):
if self._slug is None:
- self._slug = slugify(
- self.name,
- regex_subs=self.settings.get('SLUG_REGEX_SUBSTITUTIONS', []))
+ class_key = '{}_REGEX_SUBSTITUTIONS'.format(
+ self.__class__.__name__.upper())
+ if class_key in self.settings:
+ self._slug = slugify(
+ self.name,
+ regex_subs=self.settings[class_key])
+ else:
+ self._slug = slugify(
+ self.name,
+ regex_subs=self.settings.get(
+ 'SLUG_REGEX_SUBSTITUTIONS', []))
return self._slug
@slug.setter
@@ -113,39 +121,13 @@
class Category(URLWrapper):
- @property
- def slug(self):
- if self._slug is None:
- if 'CATEGORY_REGEX_SUBSTITUTIONS' in self.settings:
- subs = self.settings['CATEGORY_REGEX_SUBSTITUTIONS']
- else:
- subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])
- self._slug = slugify(self.name, regex_subs=subs)
- return self._slug
+ pass
class Tag(URLWrapper):
def __init__(self, name, *args, **kwargs):
super(Tag, self).__init__(name.strip(), *args, **kwargs)
- @property
- def slug(self):
- if self._slug is None:
- if 'TAG_REGEX_SUBSTITUTIONS' in self.settings:
- subs = self.settings['TAG_REGEX_SUBSTITUTIONS']
- else:
- subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])
- self._slug = slugify(self.name, regex_subs=subs)
- return self._slug
-
class Author(URLWrapper):
- @property
- def slug(self):
- if self._slug is None:
- if 'AUTHOR_REGEX_SUBSTITUTIONS' in self.settings:
- subs = self.settings['AUTHOR_REGEX_SUBSTITUTIONS']
- else:
- subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])
- self._slug = slugify(self.name, regex_subs=subs)
- return self._slug
+ pass
| {"golden_diff": "diff --git a/pelican/urlwrappers.py b/pelican/urlwrappers.py\n--- a/pelican/urlwrappers.py\n+++ b/pelican/urlwrappers.py\n@@ -36,9 +36,17 @@\n @property\n def slug(self):\n if self._slug is None:\n- self._slug = slugify(\n- self.name,\n- regex_subs=self.settings.get('SLUG_REGEX_SUBSTITUTIONS', []))\n+ class_key = '{}_REGEX_SUBSTITUTIONS'.format(\n+ self.__class__.__name__.upper())\n+ if class_key in self.settings:\n+ self._slug = slugify(\n+ self.name,\n+ regex_subs=self.settings[class_key])\n+ else:\n+ self._slug = slugify(\n+ self.name,\n+ regex_subs=self.settings.get(\n+ 'SLUG_REGEX_SUBSTITUTIONS', []))\n return self._slug\n \n @slug.setter\n@@ -113,39 +121,13 @@\n \n \n class Category(URLWrapper):\n- @property\n- def slug(self):\n- if self._slug is None:\n- if 'CATEGORY_REGEX_SUBSTITUTIONS' in self.settings:\n- subs = self.settings['CATEGORY_REGEX_SUBSTITUTIONS']\n- else:\n- subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])\n- self._slug = slugify(self.name, regex_subs=subs)\n- return self._slug\n+ pass\n \n \n class Tag(URLWrapper):\n def __init__(self, name, *args, **kwargs):\n super(Tag, self).__init__(name.strip(), *args, **kwargs)\n \n- @property\n- def slug(self):\n- if self._slug is None:\n- if 'TAG_REGEX_SUBSTITUTIONS' in self.settings:\n- subs = self.settings['TAG_REGEX_SUBSTITUTIONS']\n- else:\n- subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])\n- self._slug = slugify(self.name, regex_subs=subs)\n- return self._slug\n-\n \n class Author(URLWrapper):\n- @property\n- def slug(self):\n- if self._slug is None:\n- if 'AUTHOR_REGEX_SUBSTITUTIONS' in self.settings:\n- subs = self.settings['AUTHOR_REGEX_SUBSTITUTIONS']\n- else:\n- subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])\n- self._slug = slugify(self.name, regex_subs=subs)\n- return self._slug\n+ pass\n", "issue": "Category/Tag/Author slugs are not settable\nURLWrapper objects have a setter for their 'slug' property, but all of the concrete URLWrapper subclasses override the _getter_ for 'slug', which, because of the way Python's property accessors work, makes the setter inaccessible. This breaks the 'category_meta' plugin and probably other things as well.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport functools\nimport logging\nimport os\n\nimport six\n\nfrom pelican.utils import python_2_unicode_compatible, slugify\n\nlogger = logging.getLogger(__name__)\n\n\n@python_2_unicode_compatible\[email protected]_ordering\nclass URLWrapper(object):\n def __init__(self, name, settings):\n self.settings = settings\n self._name = name\n self._slug = None\n self._slug_from_name = True\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n self._name = name\n # if slug wasn't explicitly set, it needs to be regenerated from name\n # so, changing name should reset slug for slugification\n if self._slug_from_name:\n self._slug = None\n\n @property\n def slug(self):\n if self._slug is None:\n self._slug = slugify(\n self.name,\n regex_subs=self.settings.get('SLUG_REGEX_SUBSTITUTIONS', []))\n return self._slug\n\n @slug.setter\n def slug(self, slug):\n # if slug is expliticly set, changing name won't alter slug\n self._slug_from_name = False\n self._slug = slug\n\n def as_dict(self):\n d = self.__dict__\n d['name'] = self.name\n d['slug'] = self.slug\n return d\n\n def __hash__(self):\n return hash(self.slug)\n\n def _normalize_key(self, key):\n subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])\n return six.text_type(slugify(key, regex_subs=subs))\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.slug == other.slug\n if isinstance(other, six.text_type):\n return self.slug == self._normalize_key(other)\n return False\n\n def __ne__(self, other):\n if isinstance(other, self.__class__):\n return self.slug != other.slug\n if isinstance(other, six.text_type):\n return self.slug != self._normalize_key(other)\n return True\n\n def __lt__(self, other):\n if isinstance(other, self.__class__):\n return self.slug < other.slug\n if isinstance(other, six.text_type):\n return self.slug < self._normalize_key(other)\n return False\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return '<{} {}>'.format(type(self).__name__, repr(self._name))\n\n def _from_settings(self, key, get_page_name=False):\n \"\"\"Returns URL information as defined in settings.\n\n When get_page_name=True returns URL without anything after {slug} e.g.\n if in settings: CATEGORY_URL=\"cat/{slug}.html\" this returns\n \"cat/{slug}\" Useful for pagination.\n\n \"\"\"\n setting = \"%s_%s\" % (self.__class__.__name__.upper(), key)\n value = self.settings[setting]\n if not isinstance(value, six.string_types):\n logger.warning('%s is set to %s', setting, value)\n return value\n else:\n if get_page_name:\n return os.path.splitext(value)[0].format(**self.as_dict())\n else:\n return value.format(**self.as_dict())\n\n page_name = property(functools.partial(_from_settings, key='URL',\n get_page_name=True))\n url = property(functools.partial(_from_settings, key='URL'))\n save_as = property(functools.partial(_from_settings, key='SAVE_AS'))\n\n\nclass Category(URLWrapper):\n @property\n def slug(self):\n if self._slug is None:\n if 'CATEGORY_REGEX_SUBSTITUTIONS' in self.settings:\n subs = self.settings['CATEGORY_REGEX_SUBSTITUTIONS']\n else:\n subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])\n self._slug = slugify(self.name, regex_subs=subs)\n return self._slug\n\n\nclass Tag(URLWrapper):\n def __init__(self, name, *args, **kwargs):\n super(Tag, self).__init__(name.strip(), *args, **kwargs)\n\n @property\n def slug(self):\n if self._slug is None:\n if 'TAG_REGEX_SUBSTITUTIONS' in self.settings:\n subs = self.settings['TAG_REGEX_SUBSTITUTIONS']\n else:\n subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])\n self._slug = slugify(self.name, regex_subs=subs)\n return self._slug\n\n\nclass Author(URLWrapper):\n @property\n def slug(self):\n if self._slug is None:\n if 'AUTHOR_REGEX_SUBSTITUTIONS' in self.settings:\n subs = self.settings['AUTHOR_REGEX_SUBSTITUTIONS']\n else:\n subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])\n self._slug = slugify(self.name, regex_subs=subs)\n return self._slug\n", "path": "pelican/urlwrappers.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport functools\nimport logging\nimport os\n\nimport six\n\nfrom pelican.utils import python_2_unicode_compatible, slugify\n\nlogger = logging.getLogger(__name__)\n\n\n@python_2_unicode_compatible\[email protected]_ordering\nclass URLWrapper(object):\n def __init__(self, name, settings):\n self.settings = settings\n self._name = name\n self._slug = None\n self._slug_from_name = True\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n self._name = name\n # if slug wasn't explicitly set, it needs to be regenerated from name\n # so, changing name should reset slug for slugification\n if self._slug_from_name:\n self._slug = None\n\n @property\n def slug(self):\n if self._slug is None:\n class_key = '{}_REGEX_SUBSTITUTIONS'.format(\n self.__class__.__name__.upper())\n if class_key in self.settings:\n self._slug = slugify(\n self.name,\n regex_subs=self.settings[class_key])\n else:\n self._slug = slugify(\n self.name,\n regex_subs=self.settings.get(\n 'SLUG_REGEX_SUBSTITUTIONS', []))\n return self._slug\n\n @slug.setter\n def slug(self, slug):\n # if slug is expliticly set, changing name won't alter slug\n self._slug_from_name = False\n self._slug = slug\n\n def as_dict(self):\n d = self.__dict__\n d['name'] = self.name\n d['slug'] = self.slug\n return d\n\n def __hash__(self):\n return hash(self.slug)\n\n def _normalize_key(self, key):\n subs = self.settings.get('SLUG_REGEX_SUBSTITUTIONS', [])\n return six.text_type(slugify(key, regex_subs=subs))\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.slug == other.slug\n if isinstance(other, six.text_type):\n return self.slug == self._normalize_key(other)\n return False\n\n def __ne__(self, other):\n if isinstance(other, self.__class__):\n return self.slug != other.slug\n if isinstance(other, six.text_type):\n return self.slug != self._normalize_key(other)\n return True\n\n def __lt__(self, other):\n if isinstance(other, self.__class__):\n return self.slug < other.slug\n if isinstance(other, six.text_type):\n return self.slug < self._normalize_key(other)\n return False\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return '<{} {}>'.format(type(self).__name__, repr(self._name))\n\n def _from_settings(self, key, get_page_name=False):\n \"\"\"Returns URL information as defined in settings.\n\n When get_page_name=True returns URL without anything after {slug} e.g.\n if in settings: CATEGORY_URL=\"cat/{slug}.html\" this returns\n \"cat/{slug}\" Useful for pagination.\n\n \"\"\"\n setting = \"%s_%s\" % (self.__class__.__name__.upper(), key)\n value = self.settings[setting]\n if not isinstance(value, six.string_types):\n logger.warning('%s is set to %s', setting, value)\n return value\n else:\n if get_page_name:\n return os.path.splitext(value)[0].format(**self.as_dict())\n else:\n return value.format(**self.as_dict())\n\n page_name = property(functools.partial(_from_settings, key='URL',\n get_page_name=True))\n url = property(functools.partial(_from_settings, key='URL'))\n save_as = property(functools.partial(_from_settings, key='SAVE_AS'))\n\n\nclass Category(URLWrapper):\n pass\n\n\nclass Tag(URLWrapper):\n def __init__(self, name, *args, **kwargs):\n super(Tag, self).__init__(name.strip(), *args, **kwargs)\n\n\nclass Author(URLWrapper):\n pass\n", "path": "pelican/urlwrappers.py"}]} | 1,798 | 567 |
gh_patches_debug_39819 | rasdani/github-patches | git_diff | localstack__localstack-1086 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
APIGateway AWS_PROXY integration failing for paths without parameters
I'm trying to run an AWS_PROXY integration between APIGateway and Lambda, and I'm hitting what looks like a fundamental logic bug in the APIGateway implementation.
https://github.com/localstack/localstack/blob/master/localstack/services/apigateway/apigateway_listener.py#L101
```
File "/opt/code/localstack/localstack/services/apigateway/apigateway_listener.py", line 101, in forward_request
'resourceId': resource.get('id'),
UnboundLocalError: local variable 'resource' referenced before assignment
```
Scanning up the file, it looks like `resource` is only be initialized within a `catch` block. I assume this is a simple oversight rather than a complex bug.
This looks like a bug I can fix, but I'd appreciate any guidance you can offer.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `localstack/services/apigateway/apigateway_listener.py`
Content:
```
1 import re
2 import logging
3 import json
4 import requests
5
6 from requests.models import Response
7 from flask import Response as FlaskResponse
8 from localstack.constants import APPLICATION_JSON, PATH_USER_REQUEST
9 from localstack.config import TEST_KINESIS_URL
10 from localstack.utils import common
11 from localstack.utils.aws import aws_stack
12 from localstack.utils.common import to_str
13 from localstack.services.awslambda import lambda_api
14 from localstack.services.kinesis import kinesis_listener
15 from localstack.services.generic_proxy import ProxyListener
16 from .helpers import (get_rest_api_paths, get_resource_for_path,
17 flask_to_requests_response, handle_authorizers,
18 extract_query_string_params, extract_path_params,
19 make_error, get_cors_response)
20
21 # set up logger
22 LOGGER = logging.getLogger(__name__)
23
24 # regex path patterns
25 PATH_REGEX_AUTHORIZERS = r'^/restapis/([A-Za-z0-9_\-]+)/authorizers(\?.*)?'
26
27
28 class ProxyListenerApiGateway(ProxyListener):
29
30 def forward_request(self, method, path, data, headers):
31 data = data and json.loads(to_str(data))
32
33 # Paths to match
34 regex2 = r'^/restapis/([A-Za-z0-9_\-]+)/([A-Za-z0-9_\-]+)/%s/(.*)$' % PATH_USER_REQUEST
35
36 if re.match(regex2, path):
37 search_match = re.search(regex2, path)
38 api_id = search_match.group(1)
39 stage = search_match.group(2)
40 relative_path = '/%s' % search_match.group(3)
41 try:
42 integration = aws_stack.get_apigateway_integration(api_id, method, path=relative_path)
43 assert integration
44 except Exception:
45 try:
46 integration = aws_stack.get_apigateway_integration(api_id, 'ANY', path=relative_path)
47 assert integration
48 except Exception:
49 # if we have no exact match, try to find an API resource that contains path parameters
50 path_map = get_rest_api_paths(rest_api_id=api_id)
51 try:
52 extracted_path, resource = get_resource_for_path(path=relative_path, path_map=path_map)
53 except Exception:
54 return make_error('Unable to find path %s' % path, 404)
55
56 integrations = resource.get('resourceMethods', {})
57 integration = integrations.get(method, {})
58 if not integration:
59 integration = integrations.get('ANY', {})
60 integration = integration.get('methodIntegration')
61 if not integration:
62
63 if method == 'OPTIONS' and 'Origin' in headers:
64 # default to returning CORS headers if this is an OPTIONS request
65 return get_cors_response(headers)
66
67 return make_error('Unable to find integration for path %s' % path, 404)
68
69 uri = integration.get('uri')
70 if method == 'POST' and integration['type'] == 'AWS':
71 if uri.endswith('kinesis:action/PutRecords'):
72 template = integration['requestTemplates'][APPLICATION_JSON]
73 new_request = aws_stack.render_velocity_template(template, data)
74
75 # forward records to target kinesis stream
76 headers = aws_stack.mock_aws_request_headers(service='kinesis')
77 headers['X-Amz-Target'] = kinesis_listener.ACTION_PUT_RECORDS
78 result = common.make_http_request(url=TEST_KINESIS_URL,
79 method='POST', data=new_request, headers=headers)
80 return result
81 else:
82 msg = 'API Gateway action uri "%s" not yet implemented' % uri
83 LOGGER.warning(msg)
84 return make_error(msg, 404)
85
86 elif integration['type'] == 'AWS_PROXY':
87 if uri.startswith('arn:aws:apigateway:') and ':lambda:path' in uri:
88 func_arn = uri.split(':lambda:path')[1].split('functions/')[1].split('/invocations')[0]
89 account_id = uri.split(':lambda:path')[1].split(':function:')[0].split(':')[-1]
90 data_str = json.dumps(data) if isinstance(data, dict) else data
91
92 relative_path, query_string_params = extract_query_string_params(path=relative_path)
93
94 source_ip = headers['X-Forwarded-For'].split(',')[-2]
95
96 # Sample request context:
97 # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html#api-gateway-create-api-as-simple-proxy-for-lambda-test
98 request_context = {
99 'path': relative_path,
100 'accountId': account_id,
101 'resourceId': resource.get('id'),
102 'stage': stage,
103 'identity': {
104 'accountId': account_id,
105 'sourceIp': source_ip,
106 'userAgent': headers['User-Agent'],
107 }
108 }
109
110 try:
111 path_params = extract_path_params(path=relative_path, extracted_path=extracted_path)
112 except Exception:
113 path_params = {}
114
115 result = lambda_api.process_apigateway_invocation(func_arn, relative_path, data_str,
116 headers, path_params=path_params, query_string_params=query_string_params,
117 method=method, resource_path=path, request_context=request_context)
118
119 if isinstance(result, FlaskResponse):
120 return flask_to_requests_response(result)
121
122 response = Response()
123 parsed_result = result if isinstance(result, dict) else json.loads(result)
124 parsed_result = common.json_safe(parsed_result)
125 response.status_code = int(parsed_result.get('statusCode', 200))
126 response.headers.update(parsed_result.get('headers', {}))
127 try:
128 if isinstance(parsed_result['body'], dict):
129 response._content = json.dumps(parsed_result['body'])
130 else:
131 response._content = parsed_result['body']
132 except Exception:
133 response._content = '{}'
134 return response
135 else:
136 msg = 'API Gateway action uri "%s" not yet implemented' % uri
137 LOGGER.warning(msg)
138 return make_error(msg, 404)
139
140 elif integration['type'] == 'HTTP':
141 function = getattr(requests, method.lower())
142 if isinstance(data, dict):
143 data = json.dumps(data)
144 result = function(integration['uri'], data=data, headers=headers)
145 return result
146
147 else:
148 msg = ('API Gateway integration type "%s" for method "%s" not yet implemented' %
149 (integration['type'], method))
150 LOGGER.warning(msg)
151 return make_error(msg, 404)
152
153 return 200
154
155 if re.match(PATH_REGEX_AUTHORIZERS, path):
156 return handle_authorizers(method, path, data, headers)
157
158 return True
159
160
161 # instantiate listener
162 UPDATE_APIGATEWAY = ProxyListenerApiGateway()
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/localstack/services/apigateway/apigateway_listener.py b/localstack/services/apigateway/apigateway_listener.py
--- a/localstack/services/apigateway/apigateway_listener.py
+++ b/localstack/services/apigateway/apigateway_listener.py
@@ -37,34 +37,28 @@
search_match = re.search(regex2, path)
api_id = search_match.group(1)
stage = search_match.group(2)
- relative_path = '/%s' % search_match.group(3)
+ relative_path_w_query_params = '/%s' % search_match.group(3)
+
+ relative_path, query_string_params = extract_query_string_params(path=relative_path_w_query_params)
+
+ path_map = get_rest_api_paths(rest_api_id=api_id)
try:
- integration = aws_stack.get_apigateway_integration(api_id, method, path=relative_path)
- assert integration
+ extracted_path, resource = get_resource_for_path(path=relative_path, path_map=path_map)
except Exception:
- try:
- integration = aws_stack.get_apigateway_integration(api_id, 'ANY', path=relative_path)
- assert integration
- except Exception:
- # if we have no exact match, try to find an API resource that contains path parameters
- path_map = get_rest_api_paths(rest_api_id=api_id)
- try:
- extracted_path, resource = get_resource_for_path(path=relative_path, path_map=path_map)
- except Exception:
- return make_error('Unable to find path %s' % path, 404)
+ return make_error('Unable to find path %s' % path, 404)
- integrations = resource.get('resourceMethods', {})
- integration = integrations.get(method, {})
- if not integration:
- integration = integrations.get('ANY', {})
- integration = integration.get('methodIntegration')
- if not integration:
+ integrations = resource.get('resourceMethods', {})
+ integration = integrations.get(method, {})
+ if not integration:
+ integration = integrations.get('ANY', {})
+ integration = integration.get('methodIntegration')
+ if not integration:
- if method == 'OPTIONS' and 'Origin' in headers:
- # default to returning CORS headers if this is an OPTIONS request
- return get_cors_response(headers)
+ if method == 'OPTIONS' and 'Origin' in headers:
+ # default to returning CORS headers if this is an OPTIONS request
+ return get_cors_response(headers)
- return make_error('Unable to find integration for path %s' % path, 404)
+ return make_error('Unable to find integration for path %s' % path, 404)
uri = integration.get('uri')
if method == 'POST' and integration['type'] == 'AWS':
@@ -89,8 +83,6 @@
account_id = uri.split(':lambda:path')[1].split(':function:')[0].split(':')[-1]
data_str = json.dumps(data) if isinstance(data, dict) else data
- relative_path, query_string_params = extract_query_string_params(path=relative_path)
-
source_ip = headers['X-Forwarded-For'].split(',')[-2]
# Sample request context:
| {"golden_diff": "diff --git a/localstack/services/apigateway/apigateway_listener.py b/localstack/services/apigateway/apigateway_listener.py\n--- a/localstack/services/apigateway/apigateway_listener.py\n+++ b/localstack/services/apigateway/apigateway_listener.py\n@@ -37,34 +37,28 @@\n search_match = re.search(regex2, path)\n api_id = search_match.group(1)\n stage = search_match.group(2)\n- relative_path = '/%s' % search_match.group(3)\n+ relative_path_w_query_params = '/%s' % search_match.group(3)\n+\n+ relative_path, query_string_params = extract_query_string_params(path=relative_path_w_query_params)\n+\n+ path_map = get_rest_api_paths(rest_api_id=api_id)\n try:\n- integration = aws_stack.get_apigateway_integration(api_id, method, path=relative_path)\n- assert integration\n+ extracted_path, resource = get_resource_for_path(path=relative_path, path_map=path_map)\n except Exception:\n- try:\n- integration = aws_stack.get_apigateway_integration(api_id, 'ANY', path=relative_path)\n- assert integration\n- except Exception:\n- # if we have no exact match, try to find an API resource that contains path parameters\n- path_map = get_rest_api_paths(rest_api_id=api_id)\n- try:\n- extracted_path, resource = get_resource_for_path(path=relative_path, path_map=path_map)\n- except Exception:\n- return make_error('Unable to find path %s' % path, 404)\n+ return make_error('Unable to find path %s' % path, 404)\n \n- integrations = resource.get('resourceMethods', {})\n- integration = integrations.get(method, {})\n- if not integration:\n- integration = integrations.get('ANY', {})\n- integration = integration.get('methodIntegration')\n- if not integration:\n+ integrations = resource.get('resourceMethods', {})\n+ integration = integrations.get(method, {})\n+ if not integration:\n+ integration = integrations.get('ANY', {})\n+ integration = integration.get('methodIntegration')\n+ if not integration:\n \n- if method == 'OPTIONS' and 'Origin' in headers:\n- # default to returning CORS headers if this is an OPTIONS request\n- return get_cors_response(headers)\n+ if method == 'OPTIONS' and 'Origin' in headers:\n+ # default to returning CORS headers if this is an OPTIONS request\n+ return get_cors_response(headers)\n \n- return make_error('Unable to find integration for path %s' % path, 404)\n+ return make_error('Unable to find integration for path %s' % path, 404)\n \n uri = integration.get('uri')\n if method == 'POST' and integration['type'] == 'AWS':\n@@ -89,8 +83,6 @@\n account_id = uri.split(':lambda:path')[1].split(':function:')[0].split(':')[-1]\n data_str = json.dumps(data) if isinstance(data, dict) else data\n \n- relative_path, query_string_params = extract_query_string_params(path=relative_path)\n-\n source_ip = headers['X-Forwarded-For'].split(',')[-2]\n \n # Sample request context:\n", "issue": "APIGateway AWS_PROXY integration failing for paths without parameters\nI'm trying to run an AWS_PROXY integration between APIGateway and Lambda, and I'm hitting what looks like a fundamental logic bug in the APIGateway implementation.\r\n\r\nhttps://github.com/localstack/localstack/blob/master/localstack/services/apigateway/apigateway_listener.py#L101\r\n\r\n```\r\n File \"/opt/code/localstack/localstack/services/apigateway/apigateway_listener.py\", line 101, in forward_request\r\n 'resourceId': resource.get('id'),\r\nUnboundLocalError: local variable 'resource' referenced before assignment\r\n```\r\n\r\nScanning up the file, it looks like `resource` is only be initialized within a `catch` block. I assume this is a simple oversight rather than a complex bug.\r\n\r\nThis looks like a bug I can fix, but I'd appreciate any guidance you can offer.\n", "before_files": [{"content": "import re\nimport logging\nimport json\nimport requests\n\nfrom requests.models import Response\nfrom flask import Response as FlaskResponse\nfrom localstack.constants import APPLICATION_JSON, PATH_USER_REQUEST\nfrom localstack.config import TEST_KINESIS_URL\nfrom localstack.utils import common\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import to_str\nfrom localstack.services.awslambda import lambda_api\nfrom localstack.services.kinesis import kinesis_listener\nfrom localstack.services.generic_proxy import ProxyListener\nfrom .helpers import (get_rest_api_paths, get_resource_for_path,\n flask_to_requests_response, handle_authorizers,\n extract_query_string_params, extract_path_params,\n make_error, get_cors_response)\n\n# set up logger\nLOGGER = logging.getLogger(__name__)\n\n# regex path patterns\nPATH_REGEX_AUTHORIZERS = r'^/restapis/([A-Za-z0-9_\\-]+)/authorizers(\\?.*)?'\n\n\nclass ProxyListenerApiGateway(ProxyListener):\n\n def forward_request(self, method, path, data, headers):\n data = data and json.loads(to_str(data))\n\n # Paths to match\n regex2 = r'^/restapis/([A-Za-z0-9_\\-]+)/([A-Za-z0-9_\\-]+)/%s/(.*)$' % PATH_USER_REQUEST\n\n if re.match(regex2, path):\n search_match = re.search(regex2, path)\n api_id = search_match.group(1)\n stage = search_match.group(2)\n relative_path = '/%s' % search_match.group(3)\n try:\n integration = aws_stack.get_apigateway_integration(api_id, method, path=relative_path)\n assert integration\n except Exception:\n try:\n integration = aws_stack.get_apigateway_integration(api_id, 'ANY', path=relative_path)\n assert integration\n except Exception:\n # if we have no exact match, try to find an API resource that contains path parameters\n path_map = get_rest_api_paths(rest_api_id=api_id)\n try:\n extracted_path, resource = get_resource_for_path(path=relative_path, path_map=path_map)\n except Exception:\n return make_error('Unable to find path %s' % path, 404)\n\n integrations = resource.get('resourceMethods', {})\n integration = integrations.get(method, {})\n if not integration:\n integration = integrations.get('ANY', {})\n integration = integration.get('methodIntegration')\n if not integration:\n\n if method == 'OPTIONS' and 'Origin' in headers:\n # default to returning CORS headers if this is an OPTIONS request\n return get_cors_response(headers)\n\n return make_error('Unable to find integration for path %s' % path, 404)\n\n uri = integration.get('uri')\n if method == 'POST' and integration['type'] == 'AWS':\n if uri.endswith('kinesis:action/PutRecords'):\n template = integration['requestTemplates'][APPLICATION_JSON]\n new_request = aws_stack.render_velocity_template(template, data)\n\n # forward records to target kinesis stream\n headers = aws_stack.mock_aws_request_headers(service='kinesis')\n headers['X-Amz-Target'] = kinesis_listener.ACTION_PUT_RECORDS\n result = common.make_http_request(url=TEST_KINESIS_URL,\n method='POST', data=new_request, headers=headers)\n return result\n else:\n msg = 'API Gateway action uri \"%s\" not yet implemented' % uri\n LOGGER.warning(msg)\n return make_error(msg, 404)\n\n elif integration['type'] == 'AWS_PROXY':\n if uri.startswith('arn:aws:apigateway:') and ':lambda:path' in uri:\n func_arn = uri.split(':lambda:path')[1].split('functions/')[1].split('/invocations')[0]\n account_id = uri.split(':lambda:path')[1].split(':function:')[0].split(':')[-1]\n data_str = json.dumps(data) if isinstance(data, dict) else data\n\n relative_path, query_string_params = extract_query_string_params(path=relative_path)\n\n source_ip = headers['X-Forwarded-For'].split(',')[-2]\n\n # Sample request context:\n # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html#api-gateway-create-api-as-simple-proxy-for-lambda-test\n request_context = {\n 'path': relative_path,\n 'accountId': account_id,\n 'resourceId': resource.get('id'),\n 'stage': stage,\n 'identity': {\n 'accountId': account_id,\n 'sourceIp': source_ip,\n 'userAgent': headers['User-Agent'],\n }\n }\n\n try:\n path_params = extract_path_params(path=relative_path, extracted_path=extracted_path)\n except Exception:\n path_params = {}\n\n result = lambda_api.process_apigateway_invocation(func_arn, relative_path, data_str,\n headers, path_params=path_params, query_string_params=query_string_params,\n method=method, resource_path=path, request_context=request_context)\n\n if isinstance(result, FlaskResponse):\n return flask_to_requests_response(result)\n\n response = Response()\n parsed_result = result if isinstance(result, dict) else json.loads(result)\n parsed_result = common.json_safe(parsed_result)\n response.status_code = int(parsed_result.get('statusCode', 200))\n response.headers.update(parsed_result.get('headers', {}))\n try:\n if isinstance(parsed_result['body'], dict):\n response._content = json.dumps(parsed_result['body'])\n else:\n response._content = parsed_result['body']\n except Exception:\n response._content = '{}'\n return response\n else:\n msg = 'API Gateway action uri \"%s\" not yet implemented' % uri\n LOGGER.warning(msg)\n return make_error(msg, 404)\n\n elif integration['type'] == 'HTTP':\n function = getattr(requests, method.lower())\n if isinstance(data, dict):\n data = json.dumps(data)\n result = function(integration['uri'], data=data, headers=headers)\n return result\n\n else:\n msg = ('API Gateway integration type \"%s\" for method \"%s\" not yet implemented' %\n (integration['type'], method))\n LOGGER.warning(msg)\n return make_error(msg, 404)\n\n return 200\n\n if re.match(PATH_REGEX_AUTHORIZERS, path):\n return handle_authorizers(method, path, data, headers)\n\n return True\n\n\n# instantiate listener\nUPDATE_APIGATEWAY = ProxyListenerApiGateway()\n", "path": "localstack/services/apigateway/apigateway_listener.py"}], "after_files": [{"content": "import re\nimport logging\nimport json\nimport requests\n\nfrom requests.models import Response\nfrom flask import Response as FlaskResponse\nfrom localstack.constants import APPLICATION_JSON, PATH_USER_REQUEST\nfrom localstack.config import TEST_KINESIS_URL\nfrom localstack.utils import common\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import to_str\nfrom localstack.services.awslambda import lambda_api\nfrom localstack.services.kinesis import kinesis_listener\nfrom localstack.services.generic_proxy import ProxyListener\nfrom .helpers import (get_rest_api_paths, get_resource_for_path,\n flask_to_requests_response, handle_authorizers,\n extract_query_string_params, extract_path_params,\n make_error, get_cors_response)\n\n# set up logger\nLOGGER = logging.getLogger(__name__)\n\n# regex path patterns\nPATH_REGEX_AUTHORIZERS = r'^/restapis/([A-Za-z0-9_\\-]+)/authorizers(\\?.*)?'\n\n\nclass ProxyListenerApiGateway(ProxyListener):\n\n def forward_request(self, method, path, data, headers):\n data = data and json.loads(to_str(data))\n\n # Paths to match\n regex2 = r'^/restapis/([A-Za-z0-9_\\-]+)/([A-Za-z0-9_\\-]+)/%s/(.*)$' % PATH_USER_REQUEST\n\n if re.match(regex2, path):\n search_match = re.search(regex2, path)\n api_id = search_match.group(1)\n stage = search_match.group(2)\n relative_path_w_query_params = '/%s' % search_match.group(3)\n\n relative_path, query_string_params = extract_query_string_params(path=relative_path_w_query_params)\n\n path_map = get_rest_api_paths(rest_api_id=api_id)\n try:\n extracted_path, resource = get_resource_for_path(path=relative_path, path_map=path_map)\n except Exception:\n return make_error('Unable to find path %s' % path, 404)\n\n integrations = resource.get('resourceMethods', {})\n integration = integrations.get(method, {})\n if not integration:\n integration = integrations.get('ANY', {})\n integration = integration.get('methodIntegration')\n if not integration:\n\n if method == 'OPTIONS' and 'Origin' in headers:\n # default to returning CORS headers if this is an OPTIONS request\n return get_cors_response(headers)\n\n return make_error('Unable to find integration for path %s' % path, 404)\n\n uri = integration.get('uri')\n if method == 'POST' and integration['type'] == 'AWS':\n if uri.endswith('kinesis:action/PutRecords'):\n template = integration['requestTemplates'][APPLICATION_JSON]\n new_request = aws_stack.render_velocity_template(template, data)\n\n # forward records to target kinesis stream\n headers = aws_stack.mock_aws_request_headers(service='kinesis')\n headers['X-Amz-Target'] = kinesis_listener.ACTION_PUT_RECORDS\n result = common.make_http_request(url=TEST_KINESIS_URL,\n method='POST', data=new_request, headers=headers)\n return result\n else:\n msg = 'API Gateway action uri \"%s\" not yet implemented' % uri\n LOGGER.warning(msg)\n return make_error(msg, 404)\n\n elif integration['type'] == 'AWS_PROXY':\n if uri.startswith('arn:aws:apigateway:') and ':lambda:path' in uri:\n func_arn = uri.split(':lambda:path')[1].split('functions/')[1].split('/invocations')[0]\n account_id = uri.split(':lambda:path')[1].split(':function:')[0].split(':')[-1]\n data_str = json.dumps(data) if isinstance(data, dict) else data\n\n source_ip = headers['X-Forwarded-For'].split(',')[-2]\n\n # Sample request context:\n # https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-create-api-as-simple-proxy-for-lambda.html#api-gateway-create-api-as-simple-proxy-for-lambda-test\n request_context = {\n 'path': relative_path,\n 'accountId': account_id,\n 'resourceId': resource.get('id'),\n 'stage': stage,\n 'identity': {\n 'accountId': account_id,\n 'sourceIp': source_ip,\n 'userAgent': headers['User-Agent'],\n }\n }\n\n try:\n path_params = extract_path_params(path=relative_path, extracted_path=extracted_path)\n except Exception:\n path_params = {}\n\n result = lambda_api.process_apigateway_invocation(func_arn, relative_path, data_str,\n headers, path_params=path_params, query_string_params=query_string_params,\n method=method, resource_path=path, request_context=request_context)\n\n if isinstance(result, FlaskResponse):\n return flask_to_requests_response(result)\n\n response = Response()\n parsed_result = result if isinstance(result, dict) else json.loads(result)\n parsed_result = common.json_safe(parsed_result)\n response.status_code = int(parsed_result.get('statusCode', 200))\n response.headers.update(parsed_result.get('headers', {}))\n try:\n if isinstance(parsed_result['body'], dict):\n response._content = json.dumps(parsed_result['body'])\n else:\n response._content = parsed_result['body']\n except Exception:\n response._content = '{}'\n return response\n else:\n msg = 'API Gateway action uri \"%s\" not yet implemented' % uri\n LOGGER.warning(msg)\n return make_error(msg, 404)\n\n elif integration['type'] == 'HTTP':\n function = getattr(requests, method.lower())\n if isinstance(data, dict):\n data = json.dumps(data)\n result = function(integration['uri'], data=data, headers=headers)\n return result\n\n else:\n msg = ('API Gateway integration type \"%s\" for method \"%s\" not yet implemented' %\n (integration['type'], method))\n LOGGER.warning(msg)\n return make_error(msg, 404)\n\n return 200\n\n if re.match(PATH_REGEX_AUTHORIZERS, path):\n return handle_authorizers(method, path, data, headers)\n\n return True\n\n\n# instantiate listener\nUPDATE_APIGATEWAY = ProxyListenerApiGateway()\n", "path": "localstack/services/apigateway/apigateway_listener.py"}]} | 2,263 | 733 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.