problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_41403 | rasdani/github-patches | git_diff | Pyomo__pyomo-498 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Appropriate behavior for activate() and deactivate() for nested disjunctions?
```
from pyomo.environ import *
m = ConcreteModel()
m.d1 = Disjunct()
m.d2 = Disjunct()
m.d1.sub1 = Disjunct()
m.d1.sub2 = Disjunct()
m.d1.disj = Disjunction(expr=[m.d1.sub1, m.d1.sub2])
m.disj = Disjunction(expr=[m.d1, m.d2])
```
What should happen when disjuncts containing nested disjunctions are deactivated? `m.d1.deactivate()`.
The reclassifier hack complains about `m.d1.sub1` and `m.d1.sub2` not being expanded.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyomo/gdp/plugins/gdp_var_mover.py`
Content:
```
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10
11 """Collection of GDP-related hacks.
12
13 Hacks for dealing with the fact that solver writers may sometimes fail to
14 detect variables inside of Disjuncts or deactivated Blocks.
15 """
16
17 import logging
18 import textwrap
19 from pyomo.common.plugin import alias
20 from pyomo.core.base import Transformation, Block, Constraint
21 from pyomo.gdp import Disjunct
22
23 from six import itervalues
24
25 logger = logging.getLogger('pyomo.gdp')
26
27
28 class HACK_GDP_Var_Mover(Transformation):
29 """Move indicator vars to top block.
30
31 HACK: this will move all indicator variables on the model to the top block
32 so the writers can find them.
33
34 """
35
36 alias('gdp.varmover', doc=textwrap.fill(textwrap.dedent(__doc__.strip())))
37
38 def _apply_to(self, instance, **kwds):
39 assert not kwds
40 count = 0
41 disjunct_generator = instance.component_data_objects(
42 Disjunct, descend_into=(Block, Disjunct))
43 for disjunct in disjunct_generator:
44 count += 1
45 var = disjunct.indicator_var
46 var.doc = "%s(Moved from %s)" % (
47 var.doc + " " if var.doc else "", var.name, )
48 disjunct.del_component(var)
49 instance.add_component("_gdp_moved_IV_%s" % (count,), var)
50
51
52 class HACK_GDP_Disjunct_Reclassifier(Transformation):
53 """Reclassify Disjuncts to Blocks.
54
55 HACK: this will reclassify all Disjuncts to Blocks so the current writers
56 can find the variables
57
58 """
59
60 alias('gdp.reclassify',
61 doc=textwrap.fill(textwrap.dedent(__doc__.strip())))
62
63 def _apply_to(self, instance, **kwds):
64 assert not kwds
65 disjunct_generator = instance.component_objects(
66 Disjunct, descend_into=(Block, Disjunct))
67 for disjunct_component in disjunct_generator:
68 for disjunct in itervalues(disjunct_component._data):
69 if disjunct.active:
70 logger.error("""
71 Reclassifying active Disjunct "%s" as a Block. This
72 is generally an error as it indicates that the model
73 was not completely relaxed before applying the
74 gdp.reclassify transformation""" % (disjunct.name,))
75
76 # Reclassify this disjunct as a block
77 disjunct_component.parent_block().reclassify_component_type(
78 disjunct_component, Block)
79 disjunct_component._activate_without_unfixing_indicator()
80
81 # Deactivate all constraints. Note that we only need to
82 # descend into blocks: we will catch disjuncts in the outer
83 # loop.
84 #
85 # Note that we defer this until AFTER we reactivate the
86 # block, as the component_objects generator will not
87 # return anything when active=True and the block is
88 # deactivated.
89 for disjunct in itervalues(disjunct_component._data):
90 cons_in_disjunct = disjunct.component_objects(
91 Constraint, descend_into=Block, active=True)
92 for con in cons_in_disjunct:
93 con.deactivate()
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyomo/gdp/plugins/gdp_var_mover.py b/pyomo/gdp/plugins/gdp_var_mover.py
--- a/pyomo/gdp/plugins/gdp_var_mover.py
+++ b/pyomo/gdp/plugins/gdp_var_mover.py
@@ -18,7 +18,9 @@
import textwrap
from pyomo.common.plugin import alias
from pyomo.core.base import Transformation, Block, Constraint
-from pyomo.gdp import Disjunct
+from pyomo.gdp import Disjunct, GDP_Error
+from pyomo.core import TraversalStrategy
+from pyomo.common.deprecation import deprecated
from six import itervalues
@@ -35,6 +37,8 @@
alias('gdp.varmover', doc=textwrap.fill(textwrap.dedent(__doc__.strip())))
+ @deprecated(msg="The gdp.varmover transformation has been deprecated in "
+ "favor of the gdp.reclassify transformation.")
def _apply_to(self, instance, **kwds):
assert not kwds
count = 0
@@ -61,13 +65,19 @@
doc=textwrap.fill(textwrap.dedent(__doc__.strip())))
def _apply_to(self, instance, **kwds):
- assert not kwds
+ assert not kwds # no keywords expected to the transformation
disjunct_generator = instance.component_objects(
- Disjunct, descend_into=(Block, Disjunct))
+ Disjunct, descend_into=(Block, Disjunct),
+ descent_order=TraversalStrategy.PostfixDFS)
for disjunct_component in disjunct_generator:
+ # Check that the disjuncts being reclassified are all relaxed or
+ # are not on an active block.
for disjunct in itervalues(disjunct_component._data):
- if disjunct.active:
- logger.error("""
+ if (disjunct.active and
+ self._disjunct_not_relaxed(disjunct) and
+ self._disjunct_on_active_block(disjunct) and
+ self._disjunct_not_fixed_true(disjunct)):
+ raise GDP_Error("""
Reclassifying active Disjunct "%s" as a Block. This
is generally an error as it indicates that the model
was not completely relaxed before applying the
@@ -91,3 +101,31 @@
Constraint, descend_into=Block, active=True)
for con in cons_in_disjunct:
con.deactivate()
+
+ def _disjunct_not_fixed_true(self, disjunct):
+ # Return true if the disjunct indicator variable is not fixed to True
+ return not (disjunct.indicator_var.fixed and
+ disjunct.indicator_var.value == 1)
+
+ def _disjunct_not_relaxed(self, disjunct):
+ # Return True if the disjunct was not relaxed by a transformation.
+ return not getattr(
+ disjunct, '_gdp_transformation_info', {}).get('relaxed', False)
+
+ def _disjunct_on_active_block(self, disjunct):
+ # Check first to make sure that the disjunct is not a
+ # descendent of an inactive Block or fixed and deactivated
+ # Disjunct, before raising a warning.
+ parent_block = disjunct.parent_block()
+ while parent_block is not None:
+ if parent_block.type() is Block and not parent_block.active:
+ return False
+ elif (parent_block.type() is Disjunct and not parent_block.active
+ and parent_block.indicator_var.value == 0
+ and parent_block.indicator_var.fixed):
+ return False
+ else:
+ # Step up one level in the hierarchy
+ parent_block = parent_block.parent_block()
+ continue
+ return True
| {"golden_diff": "diff --git a/pyomo/gdp/plugins/gdp_var_mover.py b/pyomo/gdp/plugins/gdp_var_mover.py\n--- a/pyomo/gdp/plugins/gdp_var_mover.py\n+++ b/pyomo/gdp/plugins/gdp_var_mover.py\n@@ -18,7 +18,9 @@\n import textwrap\n from pyomo.common.plugin import alias\n from pyomo.core.base import Transformation, Block, Constraint\n-from pyomo.gdp import Disjunct\n+from pyomo.gdp import Disjunct, GDP_Error\n+from pyomo.core import TraversalStrategy\n+from pyomo.common.deprecation import deprecated\n \n from six import itervalues\n \n@@ -35,6 +37,8 @@\n \n alias('gdp.varmover', doc=textwrap.fill(textwrap.dedent(__doc__.strip())))\n \n+ @deprecated(msg=\"The gdp.varmover transformation has been deprecated in \"\n+ \"favor of the gdp.reclassify transformation.\")\n def _apply_to(self, instance, **kwds):\n assert not kwds\n count = 0\n@@ -61,13 +65,19 @@\n doc=textwrap.fill(textwrap.dedent(__doc__.strip())))\n \n def _apply_to(self, instance, **kwds):\n- assert not kwds\n+ assert not kwds # no keywords expected to the transformation\n disjunct_generator = instance.component_objects(\n- Disjunct, descend_into=(Block, Disjunct))\n+ Disjunct, descend_into=(Block, Disjunct),\n+ descent_order=TraversalStrategy.PostfixDFS)\n for disjunct_component in disjunct_generator:\n+ # Check that the disjuncts being reclassified are all relaxed or\n+ # are not on an active block.\n for disjunct in itervalues(disjunct_component._data):\n- if disjunct.active:\n- logger.error(\"\"\"\n+ if (disjunct.active and\n+ self._disjunct_not_relaxed(disjunct) and\n+ self._disjunct_on_active_block(disjunct) and\n+ self._disjunct_not_fixed_true(disjunct)):\n+ raise GDP_Error(\"\"\"\n Reclassifying active Disjunct \"%s\" as a Block. This\n is generally an error as it indicates that the model\n was not completely relaxed before applying the\n@@ -91,3 +101,31 @@\n Constraint, descend_into=Block, active=True)\n for con in cons_in_disjunct:\n con.deactivate()\n+\n+ def _disjunct_not_fixed_true(self, disjunct):\n+ # Return true if the disjunct indicator variable is not fixed to True\n+ return not (disjunct.indicator_var.fixed and\n+ disjunct.indicator_var.value == 1)\n+\n+ def _disjunct_not_relaxed(self, disjunct):\n+ # Return True if the disjunct was not relaxed by a transformation.\n+ return not getattr(\n+ disjunct, '_gdp_transformation_info', {}).get('relaxed', False)\n+\n+ def _disjunct_on_active_block(self, disjunct):\n+ # Check first to make sure that the disjunct is not a\n+ # descendent of an inactive Block or fixed and deactivated\n+ # Disjunct, before raising a warning.\n+ parent_block = disjunct.parent_block()\n+ while parent_block is not None:\n+ if parent_block.type() is Block and not parent_block.active:\n+ return False\n+ elif (parent_block.type() is Disjunct and not parent_block.active\n+ and parent_block.indicator_var.value == 0\n+ and parent_block.indicator_var.fixed):\n+ return False\n+ else:\n+ # Step up one level in the hierarchy\n+ parent_block = parent_block.parent_block()\n+ continue\n+ return True\n", "issue": "Appropriate behavior for activate() and deactivate() for nested disjunctions?\n```\r\nfrom pyomo.environ import *\r\nm = ConcreteModel()\r\nm.d1 = Disjunct()\r\nm.d2 = Disjunct()\r\nm.d1.sub1 = Disjunct()\r\nm.d1.sub2 = Disjunct()\r\nm.d1.disj = Disjunction(expr=[m.d1.sub1, m.d1.sub2])\r\nm.disj = Disjunction(expr=[m.d1, m.d2])\r\n```\r\n\r\nWhat should happen when disjuncts containing nested disjunctions are deactivated? `m.d1.deactivate()`.\r\nThe reclassifier hack complains about `m.d1.sub1` and `m.d1.sub2` not being expanded.\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\n\"\"\"Collection of GDP-related hacks.\n\nHacks for dealing with the fact that solver writers may sometimes fail to\ndetect variables inside of Disjuncts or deactivated Blocks.\n\"\"\"\n\nimport logging\nimport textwrap\nfrom pyomo.common.plugin import alias\nfrom pyomo.core.base import Transformation, Block, Constraint\nfrom pyomo.gdp import Disjunct\n\nfrom six import itervalues\n\nlogger = logging.getLogger('pyomo.gdp')\n\n\nclass HACK_GDP_Var_Mover(Transformation):\n \"\"\"Move indicator vars to top block.\n\n HACK: this will move all indicator variables on the model to the top block\n so the writers can find them.\n\n \"\"\"\n\n alias('gdp.varmover', doc=textwrap.fill(textwrap.dedent(__doc__.strip())))\n\n def _apply_to(self, instance, **kwds):\n assert not kwds\n count = 0\n disjunct_generator = instance.component_data_objects(\n Disjunct, descend_into=(Block, Disjunct))\n for disjunct in disjunct_generator:\n count += 1\n var = disjunct.indicator_var\n var.doc = \"%s(Moved from %s)\" % (\n var.doc + \" \" if var.doc else \"\", var.name, )\n disjunct.del_component(var)\n instance.add_component(\"_gdp_moved_IV_%s\" % (count,), var)\n\n\nclass HACK_GDP_Disjunct_Reclassifier(Transformation):\n \"\"\"Reclassify Disjuncts to Blocks.\n\n HACK: this will reclassify all Disjuncts to Blocks so the current writers\n can find the variables\n\n \"\"\"\n\n alias('gdp.reclassify',\n doc=textwrap.fill(textwrap.dedent(__doc__.strip())))\n\n def _apply_to(self, instance, **kwds):\n assert not kwds\n disjunct_generator = instance.component_objects(\n Disjunct, descend_into=(Block, Disjunct))\n for disjunct_component in disjunct_generator:\n for disjunct in itervalues(disjunct_component._data):\n if disjunct.active:\n logger.error(\"\"\"\n Reclassifying active Disjunct \"%s\" as a Block. This\n is generally an error as it indicates that the model\n was not completely relaxed before applying the\n gdp.reclassify transformation\"\"\" % (disjunct.name,))\n\n # Reclassify this disjunct as a block\n disjunct_component.parent_block().reclassify_component_type(\n disjunct_component, Block)\n disjunct_component._activate_without_unfixing_indicator()\n\n # Deactivate all constraints. Note that we only need to\n # descend into blocks: we will catch disjuncts in the outer\n # loop.\n #\n # Note that we defer this until AFTER we reactivate the\n # block, as the component_objects generator will not\n # return anything when active=True and the block is\n # deactivated.\n for disjunct in itervalues(disjunct_component._data):\n cons_in_disjunct = disjunct.component_objects(\n Constraint, descend_into=Block, active=True)\n for con in cons_in_disjunct:\n con.deactivate()\n", "path": "pyomo/gdp/plugins/gdp_var_mover.py"}], "after_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\n\"\"\"Collection of GDP-related hacks.\n\nHacks for dealing with the fact that solver writers may sometimes fail to\ndetect variables inside of Disjuncts or deactivated Blocks.\n\"\"\"\n\nimport logging\nimport textwrap\nfrom pyomo.common.plugin import alias\nfrom pyomo.core.base import Transformation, Block, Constraint\nfrom pyomo.gdp import Disjunct, GDP_Error\nfrom pyomo.core import TraversalStrategy\nfrom pyomo.common.deprecation import deprecated\n\nfrom six import itervalues\n\nlogger = logging.getLogger('pyomo.gdp')\n\n\nclass HACK_GDP_Var_Mover(Transformation):\n \"\"\"Move indicator vars to top block.\n\n HACK: this will move all indicator variables on the model to the top block\n so the writers can find them.\n\n \"\"\"\n\n alias('gdp.varmover', doc=textwrap.fill(textwrap.dedent(__doc__.strip())))\n\n @deprecated(msg=\"The gdp.varmover transformation has been deprecated in \"\n \"favor of the gdp.reclassify transformation.\")\n def _apply_to(self, instance, **kwds):\n assert not kwds\n count = 0\n disjunct_generator = instance.component_data_objects(\n Disjunct, descend_into=(Block, Disjunct))\n for disjunct in disjunct_generator:\n count += 1\n var = disjunct.indicator_var\n var.doc = \"%s(Moved from %s)\" % (\n var.doc + \" \" if var.doc else \"\", var.name, )\n disjunct.del_component(var)\n instance.add_component(\"_gdp_moved_IV_%s\" % (count,), var)\n\n\nclass HACK_GDP_Disjunct_Reclassifier(Transformation):\n \"\"\"Reclassify Disjuncts to Blocks.\n\n HACK: this will reclassify all Disjuncts to Blocks so the current writers\n can find the variables\n\n \"\"\"\n\n alias('gdp.reclassify',\n doc=textwrap.fill(textwrap.dedent(__doc__.strip())))\n\n def _apply_to(self, instance, **kwds):\n assert not kwds # no keywords expected to the transformation\n disjunct_generator = instance.component_objects(\n Disjunct, descend_into=(Block, Disjunct),\n descent_order=TraversalStrategy.PostfixDFS)\n for disjunct_component in disjunct_generator:\n # Check that the disjuncts being reclassified are all relaxed or\n # are not on an active block.\n for disjunct in itervalues(disjunct_component._data):\n if (disjunct.active and\n self._disjunct_not_relaxed(disjunct) and\n self._disjunct_on_active_block(disjunct) and\n self._disjunct_not_fixed_true(disjunct)):\n raise GDP_Error(\"\"\"\n Reclassifying active Disjunct \"%s\" as a Block. This\n is generally an error as it indicates that the model\n was not completely relaxed before applying the\n gdp.reclassify transformation\"\"\" % (disjunct.name,))\n\n # Reclassify this disjunct as a block\n disjunct_component.parent_block().reclassify_component_type(\n disjunct_component, Block)\n disjunct_component._activate_without_unfixing_indicator()\n\n # Deactivate all constraints. Note that we only need to\n # descend into blocks: we will catch disjuncts in the outer\n # loop.\n #\n # Note that we defer this until AFTER we reactivate the\n # block, as the component_objects generator will not\n # return anything when active=True and the block is\n # deactivated.\n for disjunct in itervalues(disjunct_component._data):\n cons_in_disjunct = disjunct.component_objects(\n Constraint, descend_into=Block, active=True)\n for con in cons_in_disjunct:\n con.deactivate()\n\n def _disjunct_not_fixed_true(self, disjunct):\n # Return true if the disjunct indicator variable is not fixed to True\n return not (disjunct.indicator_var.fixed and\n disjunct.indicator_var.value == 1)\n\n def _disjunct_not_relaxed(self, disjunct):\n # Return True if the disjunct was not relaxed by a transformation.\n return not getattr(\n disjunct, '_gdp_transformation_info', {}).get('relaxed', False)\n\n def _disjunct_on_active_block(self, disjunct):\n # Check first to make sure that the disjunct is not a\n # descendent of an inactive Block or fixed and deactivated\n # Disjunct, before raising a warning.\n parent_block = disjunct.parent_block()\n while parent_block is not None:\n if parent_block.type() is Block and not parent_block.active:\n return False\n elif (parent_block.type() is Disjunct and not parent_block.active\n and parent_block.indicator_var.value == 0\n and parent_block.indicator_var.fixed):\n return False\n else:\n # Step up one level in the hierarchy\n parent_block = parent_block.parent_block()\n continue\n return True\n", "path": "pyomo/gdp/plugins/gdp_var_mover.py"}]} | 1,401 | 864 |
gh_patches_debug_14547 | rasdani/github-patches | git_diff | inducer__relate-548 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
After pretend-facility, impersonate disappears from menu
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `course/templatetags/coursetags.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 __copyright__ = "Copyright (C) 2016 Dong Zhuang, Andreas Kloeckner"
4
5 __license__ = """
6 Permission is hereby granted, free of charge, to any person obtaining a copy
7 of this software and associated documentation files (the "Software"), to deal
8 in the Software without restriction, including without limitation the rights
9 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 copies of the Software, and to permit persons to whom the Software is
11 furnished to do so, subject to the following conditions:
12
13 The above copyright notice and this permission notice shall be included in
14 all copies or substantial portions of the Software.
15
16 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 THE SOFTWARE.
23 """
24
25 from django.template import Library, Node, TemplateSyntaxError
26 from django.utils import translation
27
28 register = Library()
29
30
31 # {{{ get language_code in JS traditional naming format
32
33 class GetCurrentLanguageJsFmtNode(Node):
34 def __init__(self, variable):
35 self.variable = variable
36
37 def render(self, context):
38 lang_name = (
39 translation.to_locale(translation.get_language()).replace("_", "-"))
40 context[self.variable] = lang_name
41 return ''
42
43
44 @register.tag("get_current_js_lang_name")
45 def do_get_current_js_lang_name(parser, token):
46 """
47 This will store the current language in the context, in js lang format.
48 This is different with built-in do_get_current_language, which returns
49 languange name like "en-us", "zh-hans". This method return lang name
50 "en-US", "zh-Hans", with the country code capitallized if country code
51 has 2 characters, and capitalize first if country code has more than 2
52 characters.
53
54 Usage::
55
56 {% get_current_language_js_lang_format as language %}
57
58 This will fetch the currently active language name with js tradition and
59 put it's value into the ``language`` context variable.
60 """
61 # token.split_contents() isn't useful here because this tag doesn't
62 # accept variable as arguments
63 args = token.contents.split()
64 if len(args) != 3 or args[1] != 'as':
65 raise TemplateSyntaxError("'get_current_js_lang_name' requires "
66 "'as variable' (got %r)" % args)
67 return GetCurrentLanguageJsFmtNode(args[2])
68
69
70 @register.filter(name='js_lang_fallback')
71 def js_lang_fallback(lang_name, js_name=None):
72 """
73 Return the fallback lang name for js files.
74 :param a :class:`str:`
75 :param js_name: a :class:`str:`, optional.
76 :return: a :class:`str:`
77 """
78
79 # The mapping is crap, we use a special case table to fix it.
80 if js_name == "fullcalendar":
81 known_fallback_mapping = {
82 "zh-hans": "zh-cn",
83 "zh-hant": "zh-tw"}
84 return known_fallback_mapping.get(lang_name.lower(), lang_name).lower()
85
86 return lang_name
87
88 # }}}
89
90
91 # {{{ filter for participation.has_permission()
92
93 @register.filter(name='has_permission')
94 def has_permission(participation, arg):
95 """
96 Check if a participation instance has specific permission.
97 :param participation: a :class:`participation:` instance
98 :param arg: String, with permission and arguments separated by comma
99 :return: a :class:`bool`
100 """
101 has_pperm = False
102 try:
103 arg_list = [s.strip() for s in arg.split(",")]
104 perm = arg_list[0]
105 argument = None
106 if len(arg_list) > 1:
107 argument = arg_list[1]
108 has_pperm = participation.has_permission(perm, argument)
109 except Exception:
110 # fail silently
111 pass
112
113 return has_pperm
114
115 # }}}
116
117
118 @register.filter(name='commit_message_as_html')
119 def commit_message_as_html(commit_sha, repo):
120 from course.versioning import _get_commit_message_as_html
121 return _get_commit_message_as_html(repo, commit_sha)
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/course/templatetags/coursetags.py b/course/templatetags/coursetags.py
--- a/course/templatetags/coursetags.py
+++ b/course/templatetags/coursetags.py
@@ -115,6 +115,28 @@
# }}}
[email protected](name='may_set_fake_time')
+def may_set_fake_time(user):
+ """
+ Check if a user may set fake time.
+ :param user: a :class:`accounts.User:` instance
+ :return: a :class:`bool`
+ """
+ from course.views import may_set_fake_time as msf
+ return msf(user)
+
+
[email protected](name='may_set_pretend_facility')
+def may_set_pretend_facility(user):
+ """
+ Check if a user may set pretend_facility
+ :param user: a :class:`accounts.User:` instance
+ :return: a :class:`bool`
+ """
+ from course.views import may_set_pretend_facility as mspf
+ return mspf(user)
+
+
@register.filter(name='commit_message_as_html')
def commit_message_as_html(commit_sha, repo):
from course.versioning import _get_commit_message_as_html
| {"golden_diff": "diff --git a/course/templatetags/coursetags.py b/course/templatetags/coursetags.py\n--- a/course/templatetags/coursetags.py\n+++ b/course/templatetags/coursetags.py\n@@ -115,6 +115,28 @@\n # }}}\n \n \[email protected](name='may_set_fake_time')\n+def may_set_fake_time(user):\n+ \"\"\"\n+ Check if a user may set fake time.\n+ :param user: a :class:`accounts.User:` instance\n+ :return: a :class:`bool`\n+ \"\"\"\n+ from course.views import may_set_fake_time as msf\n+ return msf(user)\n+\n+\[email protected](name='may_set_pretend_facility')\n+def may_set_pretend_facility(user):\n+ \"\"\"\n+ Check if a user may set pretend_facility\n+ :param user: a :class:`accounts.User:` instance\n+ :return: a :class:`bool`\n+ \"\"\"\n+ from course.views import may_set_pretend_facility as mspf\n+ return mspf(user)\n+\n+\n @register.filter(name='commit_message_as_html')\n def commit_message_as_html(commit_sha, repo):\n from course.versioning import _get_commit_message_as_html\n", "issue": "After pretend-facility, impersonate disappears from menu\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n__copyright__ = \"Copyright (C) 2016 Dong Zhuang, Andreas Kloeckner\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nfrom django.template import Library, Node, TemplateSyntaxError\nfrom django.utils import translation\n\nregister = Library()\n\n\n# {{{ get language_code in JS traditional naming format\n\nclass GetCurrentLanguageJsFmtNode(Node):\n def __init__(self, variable):\n self.variable = variable\n\n def render(self, context):\n lang_name = (\n translation.to_locale(translation.get_language()).replace(\"_\", \"-\"))\n context[self.variable] = lang_name\n return ''\n\n\[email protected](\"get_current_js_lang_name\")\ndef do_get_current_js_lang_name(parser, token):\n \"\"\"\n This will store the current language in the context, in js lang format.\n This is different with built-in do_get_current_language, which returns\n languange name like \"en-us\", \"zh-hans\". This method return lang name\n \"en-US\", \"zh-Hans\", with the country code capitallized if country code\n has 2 characters, and capitalize first if country code has more than 2\n characters.\n\n Usage::\n\n {% get_current_language_js_lang_format as language %}\n\n This will fetch the currently active language name with js tradition and\n put it's value into the ``language`` context variable.\n \"\"\"\n # token.split_contents() isn't useful here because this tag doesn't\n # accept variable as arguments\n args = token.contents.split()\n if len(args) != 3 or args[1] != 'as':\n raise TemplateSyntaxError(\"'get_current_js_lang_name' requires \"\n \"'as variable' (got %r)\" % args)\n return GetCurrentLanguageJsFmtNode(args[2])\n\n\[email protected](name='js_lang_fallback')\ndef js_lang_fallback(lang_name, js_name=None):\n \"\"\"\n Return the fallback lang name for js files.\n :param a :class:`str:`\n :param js_name: a :class:`str:`, optional.\n :return: a :class:`str:`\n \"\"\"\n\n # The mapping is crap, we use a special case table to fix it.\n if js_name == \"fullcalendar\":\n known_fallback_mapping = {\n \"zh-hans\": \"zh-cn\",\n \"zh-hant\": \"zh-tw\"}\n return known_fallback_mapping.get(lang_name.lower(), lang_name).lower()\n\n return lang_name\n\n# }}}\n\n\n# {{{ filter for participation.has_permission()\n\[email protected](name='has_permission')\ndef has_permission(participation, arg):\n \"\"\"\n Check if a participation instance has specific permission.\n :param participation: a :class:`participation:` instance\n :param arg: String, with permission and arguments separated by comma\n :return: a :class:`bool`\n \"\"\"\n has_pperm = False\n try:\n arg_list = [s.strip() for s in arg.split(\",\")]\n perm = arg_list[0]\n argument = None\n if len(arg_list) > 1:\n argument = arg_list[1]\n has_pperm = participation.has_permission(perm, argument)\n except Exception:\n # fail silently\n pass\n\n return has_pperm\n\n# }}}\n\n\[email protected](name='commit_message_as_html')\ndef commit_message_as_html(commit_sha, repo):\n from course.versioning import _get_commit_message_as_html\n return _get_commit_message_as_html(repo, commit_sha)\n", "path": "course/templatetags/coursetags.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n__copyright__ = \"Copyright (C) 2016 Dong Zhuang, Andreas Kloeckner\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nfrom django.template import Library, Node, TemplateSyntaxError\nfrom django.utils import translation\n\nregister = Library()\n\n\n# {{{ get language_code in JS traditional naming format\n\nclass GetCurrentLanguageJsFmtNode(Node):\n def __init__(self, variable):\n self.variable = variable\n\n def render(self, context):\n lang_name = (\n translation.to_locale(translation.get_language()).replace(\"_\", \"-\"))\n context[self.variable] = lang_name\n return ''\n\n\[email protected](\"get_current_js_lang_name\")\ndef do_get_current_js_lang_name(parser, token):\n \"\"\"\n This will store the current language in the context, in js lang format.\n This is different with built-in do_get_current_language, which returns\n languange name like \"en-us\", \"zh-hans\". This method return lang name\n \"en-US\", \"zh-Hans\", with the country code capitallized if country code\n has 2 characters, and capitalize first if country code has more than 2\n characters.\n\n Usage::\n\n {% get_current_language_js_lang_format as language %}\n\n This will fetch the currently active language name with js tradition and\n put it's value into the ``language`` context variable.\n \"\"\"\n # token.split_contents() isn't useful here because this tag doesn't\n # accept variable as arguments\n args = token.contents.split()\n if len(args) != 3 or args[1] != 'as':\n raise TemplateSyntaxError(\"'get_current_js_lang_name' requires \"\n \"'as variable' (got %r)\" % args)\n return GetCurrentLanguageJsFmtNode(args[2])\n\n\[email protected](name='js_lang_fallback')\ndef js_lang_fallback(lang_name, js_name=None):\n \"\"\"\n Return the fallback lang name for js files.\n :param a :class:`str:`\n :param js_name: a :class:`str:`, optional.\n :return: a :class:`str:`\n \"\"\"\n\n # The mapping is crap, we use a special case table to fix it.\n if js_name == \"fullcalendar\":\n known_fallback_mapping = {\n \"zh-hans\": \"zh-cn\",\n \"zh-hant\": \"zh-tw\"}\n return known_fallback_mapping.get(lang_name.lower(), lang_name).lower()\n\n return lang_name\n\n# }}}\n\n\n# {{{ filter for participation.has_permission()\n\[email protected](name='has_permission')\ndef has_permission(participation, arg):\n \"\"\"\n Check if a participation instance has specific permission.\n :param participation: a :class:`participation:` instance\n :param arg: String, with permission and arguments separated by comma\n :return: a :class:`bool`\n \"\"\"\n has_pperm = False\n try:\n arg_list = [s.strip() for s in arg.split(\",\")]\n perm = arg_list[0]\n argument = None\n if len(arg_list) > 1:\n argument = arg_list[1]\n has_pperm = participation.has_permission(perm, argument)\n except Exception:\n # fail silently\n pass\n\n return has_pperm\n\n# }}}\n\n\[email protected](name='may_set_fake_time')\ndef may_set_fake_time(user):\n \"\"\"\n Check if a user may set fake time.\n :param user: a :class:`accounts.User:` instance\n :return: a :class:`bool`\n \"\"\"\n from course.views import may_set_fake_time as msf\n return msf(user)\n\n\[email protected](name='may_set_pretend_facility')\ndef may_set_pretend_facility(user):\n \"\"\"\n Check if a user may set pretend_facility\n :param user: a :class:`accounts.User:` instance\n :return: a :class:`bool`\n \"\"\"\n from course.views import may_set_pretend_facility as mspf\n return mspf(user)\n\n\[email protected](name='commit_message_as_html')\ndef commit_message_as_html(commit_sha, repo):\n from course.versioning import _get_commit_message_as_html\n return _get_commit_message_as_html(repo, commit_sha)\n", "path": "course/templatetags/coursetags.py"}]} | 1,503 | 290 |
gh_patches_debug_30616 | rasdani/github-patches | git_diff | rotki__rotki-897 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Changing fiat currency does not change manually tracked balances value column title
## Problem Definition
Seen in v1.4.0 by @cryptomole1
Changing the FIAT currency via the drop-down menu if the user has manually tracked balances changes the value but not the title of the column. The title of the column is stuck to being `USD Value`.
I can confirm the problem, just tested it.
## Task
- Fix it
- Write a unit test
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rotkehlchen/balances/manual.py`
Content:
```
1 from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional
2
3 from rotkehlchen.assets.asset import Asset
4 from rotkehlchen.errors import InputError
5 from rotkehlchen.fval import FVal
6 from rotkehlchen.inquirer import Inquirer
7 from rotkehlchen.typing import Location
8
9 if TYPE_CHECKING:
10 from rotkehlchen.db.dbhandler import DBHandler
11
12
13 class ManuallyTrackedBalance(NamedTuple):
14 asset: Asset
15 label: str
16 amount: FVal
17 location: Location
18 tags: Optional[List[str]]
19
20
21 class ManuallyTrackedBalanceWithValue(NamedTuple):
22 # NamedTuples can't use inheritance. Make sure this has same fields as
23 # ManuallyTrackedBalance until usd_value
24 asset: Asset
25 label: str
26 amount: FVal
27 location: Location
28 tags: Optional[List[str]]
29 usd_value: FVal
30
31
32 def get_manually_tracked_balances(db: 'DBHandler') -> List[ManuallyTrackedBalanceWithValue]:
33 """Gets the manually tracked balances
34
35 May raise:
36 - RemoteError if there is a problem querying for the current price of an asset
37 """
38 balances = db.get_manually_tracked_balances()
39 balances_with_value = []
40 for entry in balances:
41 price = Inquirer().find_usd_price(entry.asset)
42 # https://github.com/python/mypy/issues/2582 --> for the type ignore below
43 balances_with_value.append(ManuallyTrackedBalanceWithValue( # type: ignore
44 **entry._asdict(),
45 usd_value=price * entry.amount,
46 ))
47
48 return balances_with_value
49
50
51 def add_manually_tracked_balances(
52 db: 'DBHandler',
53 data: List[ManuallyTrackedBalance],
54 ) -> None:
55 """Adds manually tracked balances
56
57 May raise:
58 - InputError if any of the given balance entry labels already exist in the DB
59 - TagConstraintError if any of the given manually tracked balances contain unknown tags.
60 """
61 if len(data) == 0:
62 raise InputError('Empty list of manually tracked balances to add was given')
63 db.ensure_tags_exist(
64 given_data=data,
65 action='adding',
66 data_type='manually tracked balances',
67 )
68 db.add_manually_tracked_balances(data=data)
69
70
71 def edit_manually_tracked_balances(db: 'DBHandler', data: List[ManuallyTrackedBalance]) -> None:
72 """Edits manually tracked balances
73
74 May raise:
75 - InputError if the given balances list is empty or if
76 any of the balance entry labels to edit do not exist in the DB.
77 - TagConstraintError if any of the given balance data contain unknown tags.
78 """
79 if len(data) == 0:
80 raise InputError('Empty list of manually tracked balances to edit was given')
81 db.ensure_tags_exist(
82 given_data=data,
83 action='editing',
84 data_type='manually tracked balances',
85 )
86 db.edit_manually_tracked_balances(data)
87
88
89 def account_for_manually_tracked_balances(
90 db: 'DBHandler',
91 balances: Dict[str, Any],
92 ) -> Dict[str, Any]:
93 """Given the big balances mapping adds to it all manually tracked balances"""
94 manually_tracked_balances = get_manually_tracked_balances(db)
95 for m_entry in manually_tracked_balances:
96 location_str = str(m_entry.location)
97 if location_str not in balances:
98 balances[location_str] = {}
99 balances[location_str][m_entry.asset.identifier] = {
100 'amount': m_entry.amount,
101 'usd_value': m_entry.usd_value,
102 }
103 else:
104 if m_entry.asset.identifier not in balances[location_str]:
105 balances[location_str][m_entry.asset.identifier] = {
106 'amount': m_entry.amount,
107 'usd_value': m_entry.usd_value,
108 }
109 else:
110 old_amount = balances[location_str][m_entry.asset.identifier]['amount']
111 old_usd_value = balances[location_str][m_entry.asset.identifier]['usd_value']
112 balances[location_str][m_entry.asset.identifier] = {
113 'amount': old_amount + m_entry.amount,
114 'usd_value': old_usd_value + m_entry.usd_value,
115 }
116
117 return balances
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rotkehlchen/balances/manual.py b/rotkehlchen/balances/manual.py
--- a/rotkehlchen/balances/manual.py
+++ b/rotkehlchen/balances/manual.py
@@ -1,10 +1,11 @@
from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional
from rotkehlchen.assets.asset import Asset
-from rotkehlchen.errors import InputError
+from rotkehlchen.constants.misc import ZERO
+from rotkehlchen.errors import InputError, RemoteError
from rotkehlchen.fval import FVal
from rotkehlchen.inquirer import Inquirer
-from rotkehlchen.typing import Location
+from rotkehlchen.typing import Location, Price
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
@@ -30,15 +31,18 @@
def get_manually_tracked_balances(db: 'DBHandler') -> List[ManuallyTrackedBalanceWithValue]:
- """Gets the manually tracked balances
-
- May raise:
- - RemoteError if there is a problem querying for the current price of an asset
- """
+ """Gets the manually tracked balances"""
balances = db.get_manually_tracked_balances()
balances_with_value = []
for entry in balances:
- price = Inquirer().find_usd_price(entry.asset)
+ try:
+ price = Inquirer().find_usd_price(entry.asset)
+ except RemoteError as e:
+ db.msg_aggregator.add_warning(
+ f'Could not find price for {entry.asset.identifier} during '
+ f'manually tracked balance querying due to {str(e)}',
+ )
+ price = Price(ZERO)
# https://github.com/python/mypy/issues/2582 --> for the type ignore below
balances_with_value.append(ManuallyTrackedBalanceWithValue( # type: ignore
**entry._asdict(),
| {"golden_diff": "diff --git a/rotkehlchen/balances/manual.py b/rotkehlchen/balances/manual.py\n--- a/rotkehlchen/balances/manual.py\n+++ b/rotkehlchen/balances/manual.py\n@@ -1,10 +1,11 @@\n from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional\n \n from rotkehlchen.assets.asset import Asset\n-from rotkehlchen.errors import InputError\n+from rotkehlchen.constants.misc import ZERO\n+from rotkehlchen.errors import InputError, RemoteError\n from rotkehlchen.fval import FVal\n from rotkehlchen.inquirer import Inquirer\n-from rotkehlchen.typing import Location\n+from rotkehlchen.typing import Location, Price\n \n if TYPE_CHECKING:\n from rotkehlchen.db.dbhandler import DBHandler\n@@ -30,15 +31,18 @@\n \n \n def get_manually_tracked_balances(db: 'DBHandler') -> List[ManuallyTrackedBalanceWithValue]:\n- \"\"\"Gets the manually tracked balances\n-\n- May raise:\n- - RemoteError if there is a problem querying for the current price of an asset\n- \"\"\"\n+ \"\"\"Gets the manually tracked balances\"\"\"\n balances = db.get_manually_tracked_balances()\n balances_with_value = []\n for entry in balances:\n- price = Inquirer().find_usd_price(entry.asset)\n+ try:\n+ price = Inquirer().find_usd_price(entry.asset)\n+ except RemoteError as e:\n+ db.msg_aggregator.add_warning(\n+ f'Could not find price for {entry.asset.identifier} during '\n+ f'manually tracked balance querying due to {str(e)}',\n+ )\n+ price = Price(ZERO)\n # https://github.com/python/mypy/issues/2582 --> for the type ignore below\n balances_with_value.append(ManuallyTrackedBalanceWithValue( # type: ignore\n **entry._asdict(),\n", "issue": "Changing fiat currency does not change manually tracked balances value column title\n## Problem Definition\r\n\r\nSeen in v1.4.0 by @cryptomole1\r\n\r\nChanging the FIAT currency via the drop-down menu if the user has manually tracked balances changes the value but not the title of the column. The title of the column is stuck to being `USD Value`.\r\n\r\nI can confirm the problem, just tested it.\r\n\r\n## Task\r\n\r\n- Fix it\r\n- Write a unit test\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional\n\nfrom rotkehlchen.assets.asset import Asset\nfrom rotkehlchen.errors import InputError\nfrom rotkehlchen.fval import FVal\nfrom rotkehlchen.inquirer import Inquirer\nfrom rotkehlchen.typing import Location\n\nif TYPE_CHECKING:\n from rotkehlchen.db.dbhandler import DBHandler\n\n\nclass ManuallyTrackedBalance(NamedTuple):\n asset: Asset\n label: str\n amount: FVal\n location: Location\n tags: Optional[List[str]]\n\n\nclass ManuallyTrackedBalanceWithValue(NamedTuple):\n # NamedTuples can't use inheritance. Make sure this has same fields as\n # ManuallyTrackedBalance until usd_value\n asset: Asset\n label: str\n amount: FVal\n location: Location\n tags: Optional[List[str]]\n usd_value: FVal\n\n\ndef get_manually_tracked_balances(db: 'DBHandler') -> List[ManuallyTrackedBalanceWithValue]:\n \"\"\"Gets the manually tracked balances\n\n May raise:\n - RemoteError if there is a problem querying for the current price of an asset\n \"\"\"\n balances = db.get_manually_tracked_balances()\n balances_with_value = []\n for entry in balances:\n price = Inquirer().find_usd_price(entry.asset)\n # https://github.com/python/mypy/issues/2582 --> for the type ignore below\n balances_with_value.append(ManuallyTrackedBalanceWithValue( # type: ignore\n **entry._asdict(),\n usd_value=price * entry.amount,\n ))\n\n return balances_with_value\n\n\ndef add_manually_tracked_balances(\n db: 'DBHandler',\n data: List[ManuallyTrackedBalance],\n) -> None:\n \"\"\"Adds manually tracked balances\n\n May raise:\n - InputError if any of the given balance entry labels already exist in the DB\n - TagConstraintError if any of the given manually tracked balances contain unknown tags.\n \"\"\"\n if len(data) == 0:\n raise InputError('Empty list of manually tracked balances to add was given')\n db.ensure_tags_exist(\n given_data=data,\n action='adding',\n data_type='manually tracked balances',\n )\n db.add_manually_tracked_balances(data=data)\n\n\ndef edit_manually_tracked_balances(db: 'DBHandler', data: List[ManuallyTrackedBalance]) -> None:\n \"\"\"Edits manually tracked balances\n\n May raise:\n - InputError if the given balances list is empty or if\n any of the balance entry labels to edit do not exist in the DB.\n - TagConstraintError if any of the given balance data contain unknown tags.\n \"\"\"\n if len(data) == 0:\n raise InputError('Empty list of manually tracked balances to edit was given')\n db.ensure_tags_exist(\n given_data=data,\n action='editing',\n data_type='manually tracked balances',\n )\n db.edit_manually_tracked_balances(data)\n\n\ndef account_for_manually_tracked_balances(\n db: 'DBHandler',\n balances: Dict[str, Any],\n) -> Dict[str, Any]:\n \"\"\"Given the big balances mapping adds to it all manually tracked balances\"\"\"\n manually_tracked_balances = get_manually_tracked_balances(db)\n for m_entry in manually_tracked_balances:\n location_str = str(m_entry.location)\n if location_str not in balances:\n balances[location_str] = {}\n balances[location_str][m_entry.asset.identifier] = {\n 'amount': m_entry.amount,\n 'usd_value': m_entry.usd_value,\n }\n else:\n if m_entry.asset.identifier not in balances[location_str]:\n balances[location_str][m_entry.asset.identifier] = {\n 'amount': m_entry.amount,\n 'usd_value': m_entry.usd_value,\n }\n else:\n old_amount = balances[location_str][m_entry.asset.identifier]['amount']\n old_usd_value = balances[location_str][m_entry.asset.identifier]['usd_value']\n balances[location_str][m_entry.asset.identifier] = {\n 'amount': old_amount + m_entry.amount,\n 'usd_value': old_usd_value + m_entry.usd_value,\n }\n\n return balances\n", "path": "rotkehlchen/balances/manual.py"}], "after_files": [{"content": "from typing import TYPE_CHECKING, Any, Dict, List, NamedTuple, Optional\n\nfrom rotkehlchen.assets.asset import Asset\nfrom rotkehlchen.constants.misc import ZERO\nfrom rotkehlchen.errors import InputError, RemoteError\nfrom rotkehlchen.fval import FVal\nfrom rotkehlchen.inquirer import Inquirer\nfrom rotkehlchen.typing import Location, Price\n\nif TYPE_CHECKING:\n from rotkehlchen.db.dbhandler import DBHandler\n\n\nclass ManuallyTrackedBalance(NamedTuple):\n asset: Asset\n label: str\n amount: FVal\n location: Location\n tags: Optional[List[str]]\n\n\nclass ManuallyTrackedBalanceWithValue(NamedTuple):\n # NamedTuples can't use inheritance. Make sure this has same fields as\n # ManuallyTrackedBalance until usd_value\n asset: Asset\n label: str\n amount: FVal\n location: Location\n tags: Optional[List[str]]\n usd_value: FVal\n\n\ndef get_manually_tracked_balances(db: 'DBHandler') -> List[ManuallyTrackedBalanceWithValue]:\n \"\"\"Gets the manually tracked balances\"\"\"\n balances = db.get_manually_tracked_balances()\n balances_with_value = []\n for entry in balances:\n try:\n price = Inquirer().find_usd_price(entry.asset)\n except RemoteError as e:\n db.msg_aggregator.add_warning(\n f'Could not find price for {entry.asset.identifier} during '\n f'manually tracked balance querying due to {str(e)}',\n )\n price = Price(ZERO)\n # https://github.com/python/mypy/issues/2582 --> for the type ignore below\n balances_with_value.append(ManuallyTrackedBalanceWithValue( # type: ignore\n **entry._asdict(),\n usd_value=price * entry.amount,\n ))\n\n return balances_with_value\n\n\ndef add_manually_tracked_balances(\n db: 'DBHandler',\n data: List[ManuallyTrackedBalance],\n) -> None:\n \"\"\"Adds manually tracked balances\n\n May raise:\n - InputError if any of the given balance entry labels already exist in the DB\n - TagConstraintError if any of the given manually tracked balances contain unknown tags.\n \"\"\"\n if len(data) == 0:\n raise InputError('Empty list of manually tracked balances to add was given')\n db.ensure_tags_exist(\n given_data=data,\n action='adding',\n data_type='manually tracked balances',\n )\n db.add_manually_tracked_balances(data=data)\n\n\ndef edit_manually_tracked_balances(db: 'DBHandler', data: List[ManuallyTrackedBalance]) -> None:\n \"\"\"Edits manually tracked balances\n\n May raise:\n - InputError if the given balances list is empty or if\n any of the balance entry labels to edit do not exist in the DB.\n - TagConstraintError if any of the given balance data contain unknown tags.\n \"\"\"\n if len(data) == 0:\n raise InputError('Empty list of manually tracked balances to edit was given')\n db.ensure_tags_exist(\n given_data=data,\n action='editing',\n data_type='manually tracked balances',\n )\n db.edit_manually_tracked_balances(data)\n\n\ndef account_for_manually_tracked_balances(\n db: 'DBHandler',\n balances: Dict[str, Any],\n) -> Dict[str, Any]:\n \"\"\"Given the big balances mapping adds to it all manually tracked balances\"\"\"\n manually_tracked_balances = get_manually_tracked_balances(db)\n for m_entry in manually_tracked_balances:\n location_str = str(m_entry.location)\n if location_str not in balances:\n balances[location_str] = {}\n balances[location_str][m_entry.asset.identifier] = {\n 'amount': m_entry.amount,\n 'usd_value': m_entry.usd_value,\n }\n else:\n if m_entry.asset.identifier not in balances[location_str]:\n balances[location_str][m_entry.asset.identifier] = {\n 'amount': m_entry.amount,\n 'usd_value': m_entry.usd_value,\n }\n else:\n old_amount = balances[location_str][m_entry.asset.identifier]['amount']\n old_usd_value = balances[location_str][m_entry.asset.identifier]['usd_value']\n balances[location_str][m_entry.asset.identifier] = {\n 'amount': old_amount + m_entry.amount,\n 'usd_value': old_usd_value + m_entry.usd_value,\n }\n\n return balances\n", "path": "rotkehlchen/balances/manual.py"}]} | 1,551 | 439 |
gh_patches_debug_29682 | rasdani/github-patches | git_diff | GeotrekCE__Geotrek-admin-990 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add requirement about projection units : should be meters
http://gis.stackexchange.com/questions/85955/how-can-i-obtain-the-unit-of-a-projection-srid
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geotrek/common/__init__.py`
Content:
```
1 """
2
3 Geotrek startup script.
4
5 This is executed only once at startup.
6
7 """
8 from south.signals import post_migrate
9 from django.conf import settings
10 from django.db.models.signals import post_syncdb
11
12 from mapentity.helpers import api_bbox
13
14 from geotrek.common.utils.postgresql import load_sql_files
15
16
17 """
18 http://djangosnippets.org/snippets/2311/
19 Ensure South will update our custom SQL during a call to `migrate`.
20 """
21
22 def run_initial_sql_post_migrate(sender, **kwargs):
23 app_label = kwargs.get('app')
24 load_sql_files(app_label)
25
26
27 def run_initial_sql_post_syncdb(sender, **kwargs):
28 app = kwargs.get('app')
29 models_module = app.__name__
30 app_label = models_module.rsplit('.')[-2]
31 load_sql_files(app_label)
32
33
34 if settings.TEST and not settings.SOUTH_TESTS_MIGRATE:
35 post_syncdb.connect(run_initial_sql_post_syncdb, dispatch_uid="geotrek.core.sqlautoload")
36 # During tests, the signal is received twice unfortunately
37 # https://code.djangoproject.com/ticket/17977
38 else:
39 post_migrate.connect(run_initial_sql_post_migrate, dispatch_uid="geotrek.core.sqlautoload")
40
41
42 """
43 Computed client-side setting.
44 """
45 settings.LEAFLET_CONFIG['SPATIAL_EXTENT'] = api_bbox(settings.SPATIAL_EXTENT, buffer=0.5)
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/geotrek/common/__init__.py b/geotrek/common/__init__.py
--- a/geotrek/common/__init__.py
+++ b/geotrek/common/__init__.py
@@ -5,9 +5,11 @@
This is executed only once at startup.
"""
-from south.signals import post_migrate
+from south.signals import pre_migrate, post_migrate
from django.conf import settings
-from django.db.models.signals import post_syncdb
+from django.db import connection
+from django.db.models.signals import pre_syncdb, post_syncdb
+from django.core.exceptions import ImproperlyConfigured
from mapentity.helpers import api_bbox
@@ -31,11 +33,26 @@
load_sql_files(app_label)
+def check_srid_has_meter_unit(sender, **kwargs):
+ if not hasattr(check_srid_has_meter_unit, '_checked'):
+ cursor = connection.cursor()
+ cursor.execute("""
+ SELECT * FROM spatial_ref_sys
+ WHERE srtext ILIKE '%%meter%%' AND srid=%s;""", [settings.SRID])
+ results = cursor.fetchall()
+ if len(results) == 0:
+ err_msg = 'Unit of SRID EPSG:%s is not meter.' % settings.SRID
+ raise ImproperlyConfigured(err_msg)
+ check_srid_has_meter_unit._checked = True
+
+
if settings.TEST and not settings.SOUTH_TESTS_MIGRATE:
+ pre_syncdb.connect(check_srid_has_meter_unit, dispatch_uid="geotrek.core.checksrid")
post_syncdb.connect(run_initial_sql_post_syncdb, dispatch_uid="geotrek.core.sqlautoload")
# During tests, the signal is received twice unfortunately
# https://code.djangoproject.com/ticket/17977
else:
+ pre_migrate.connect(check_srid_has_meter_unit, dispatch_uid="geotrek.core.checksrid")
post_migrate.connect(run_initial_sql_post_migrate, dispatch_uid="geotrek.core.sqlautoload")
| {"golden_diff": "diff --git a/geotrek/common/__init__.py b/geotrek/common/__init__.py\n--- a/geotrek/common/__init__.py\n+++ b/geotrek/common/__init__.py\n@@ -5,9 +5,11 @@\n This is executed only once at startup.\n \n \"\"\"\n-from south.signals import post_migrate\n+from south.signals import pre_migrate, post_migrate\n from django.conf import settings\n-from django.db.models.signals import post_syncdb\n+from django.db import connection\n+from django.db.models.signals import pre_syncdb, post_syncdb\n+from django.core.exceptions import ImproperlyConfigured\n \n from mapentity.helpers import api_bbox\n \n@@ -31,11 +33,26 @@\n load_sql_files(app_label)\n \n \n+def check_srid_has_meter_unit(sender, **kwargs):\n+ if not hasattr(check_srid_has_meter_unit, '_checked'):\n+ cursor = connection.cursor()\n+ cursor.execute(\"\"\"\n+ SELECT * FROM spatial_ref_sys\n+ WHERE srtext ILIKE '%%meter%%' AND srid=%s;\"\"\", [settings.SRID])\n+ results = cursor.fetchall()\n+ if len(results) == 0:\n+ err_msg = 'Unit of SRID EPSG:%s is not meter.' % settings.SRID\n+ raise ImproperlyConfigured(err_msg)\n+ check_srid_has_meter_unit._checked = True\n+\n+\n if settings.TEST and not settings.SOUTH_TESTS_MIGRATE:\n+ pre_syncdb.connect(check_srid_has_meter_unit, dispatch_uid=\"geotrek.core.checksrid\")\n post_syncdb.connect(run_initial_sql_post_syncdb, dispatch_uid=\"geotrek.core.sqlautoload\")\n # During tests, the signal is received twice unfortunately\n # https://code.djangoproject.com/ticket/17977\n else:\n+ pre_migrate.connect(check_srid_has_meter_unit, dispatch_uid=\"geotrek.core.checksrid\")\n post_migrate.connect(run_initial_sql_post_migrate, dispatch_uid=\"geotrek.core.sqlautoload\")\n", "issue": "Add requirement about projection units : should be meters\nhttp://gis.stackexchange.com/questions/85955/how-can-i-obtain-the-unit-of-a-projection-srid\n\n", "before_files": [{"content": "\"\"\"\n\n Geotrek startup script.\n\n This is executed only once at startup.\n\n\"\"\"\nfrom south.signals import post_migrate\nfrom django.conf import settings\nfrom django.db.models.signals import post_syncdb\n\nfrom mapentity.helpers import api_bbox\n\nfrom geotrek.common.utils.postgresql import load_sql_files\n\n\n\"\"\"\n http://djangosnippets.org/snippets/2311/\n Ensure South will update our custom SQL during a call to `migrate`.\n\"\"\"\n\ndef run_initial_sql_post_migrate(sender, **kwargs):\n app_label = kwargs.get('app')\n load_sql_files(app_label)\n\n\ndef run_initial_sql_post_syncdb(sender, **kwargs):\n app = kwargs.get('app')\n models_module = app.__name__\n app_label = models_module.rsplit('.')[-2]\n load_sql_files(app_label)\n\n\nif settings.TEST and not settings.SOUTH_TESTS_MIGRATE:\n post_syncdb.connect(run_initial_sql_post_syncdb, dispatch_uid=\"geotrek.core.sqlautoload\")\n # During tests, the signal is received twice unfortunately\n # https://code.djangoproject.com/ticket/17977\nelse:\n post_migrate.connect(run_initial_sql_post_migrate, dispatch_uid=\"geotrek.core.sqlautoload\")\n\n\n\"\"\"\n Computed client-side setting.\n\"\"\"\nsettings.LEAFLET_CONFIG['SPATIAL_EXTENT'] = api_bbox(settings.SPATIAL_EXTENT, buffer=0.5)\n", "path": "geotrek/common/__init__.py"}], "after_files": [{"content": "\"\"\"\n\n Geotrek startup script.\n\n This is executed only once at startup.\n\n\"\"\"\nfrom south.signals import pre_migrate, post_migrate\nfrom django.conf import settings\nfrom django.db import connection\nfrom django.db.models.signals import pre_syncdb, post_syncdb\nfrom django.core.exceptions import ImproperlyConfigured\n\nfrom mapentity.helpers import api_bbox\n\nfrom geotrek.common.utils.postgresql import load_sql_files\n\n\n\"\"\"\n http://djangosnippets.org/snippets/2311/\n Ensure South will update our custom SQL during a call to `migrate`.\n\"\"\"\n\ndef run_initial_sql_post_migrate(sender, **kwargs):\n app_label = kwargs.get('app')\n load_sql_files(app_label)\n\n\ndef run_initial_sql_post_syncdb(sender, **kwargs):\n app = kwargs.get('app')\n models_module = app.__name__\n app_label = models_module.rsplit('.')[-2]\n load_sql_files(app_label)\n\n\ndef check_srid_has_meter_unit(sender, **kwargs):\n if not hasattr(check_srid_has_meter_unit, '_checked'):\n cursor = connection.cursor()\n cursor.execute(\"\"\"\n SELECT * FROM spatial_ref_sys\n WHERE srtext ILIKE '%%meter%%' AND srid=%s;\"\"\", [settings.SRID])\n results = cursor.fetchall()\n if len(results) == 0:\n err_msg = 'Unit of SRID EPSG:%s is not meter.' % settings.SRID\n raise ImproperlyConfigured(err_msg)\n check_srid_has_meter_unit._checked = True\n\n\nif settings.TEST and not settings.SOUTH_TESTS_MIGRATE:\n pre_syncdb.connect(check_srid_has_meter_unit, dispatch_uid=\"geotrek.core.checksrid\")\n post_syncdb.connect(run_initial_sql_post_syncdb, dispatch_uid=\"geotrek.core.sqlautoload\")\n # During tests, the signal is received twice unfortunately\n # https://code.djangoproject.com/ticket/17977\nelse:\n pre_migrate.connect(check_srid_has_meter_unit, dispatch_uid=\"geotrek.core.checksrid\")\n post_migrate.connect(run_initial_sql_post_migrate, dispatch_uid=\"geotrek.core.sqlautoload\")\n\n\n\"\"\"\n Computed client-side setting.\n\"\"\"\nsettings.LEAFLET_CONFIG['SPATIAL_EXTENT'] = api_bbox(settings.SPATIAL_EXTENT, buffer=0.5)\n", "path": "geotrek/common/__init__.py"}]} | 693 | 442 |
gh_patches_debug_47980 | rasdani/github-patches | git_diff | TheAlgorithms__Python-564 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unnecessary Loop
https://github.com/TheAlgorithms/Python/blob/4e0184a41dd3e4838da484057d25e17234353da0/dynamic_programming/matrix_chain_order.py#L12-L15
`Line 12` creates a NxN Matrix that **contains 0**. So there is no need for `Line 14-15`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dynamic_programming/matrix_chain_order.py`
Content:
```
1 from __future__ import print_function
2
3 import sys
4 '''
5 Dynamic Programming
6 Implementation of Matrix Chain Multiplication
7 Time Complexity: O(n^3)
8 Space Complexity: O(n^2)
9 '''
10 def MatrixChainOrder(array):
11 N=len(array)
12 Matrix=[[0 for x in range(N)] for x in range(N)]
13 Sol=[[0 for x in range(N)] for x in range(N)]
14 for i in range(1,N):
15 Matrix[i][i]=0
16
17 for ChainLength in range(2,N):
18 for a in range(1,N-ChainLength+1):
19 b = a+ChainLength-1
20
21 Matrix[a][b] = sys.maxsize
22 for c in range(a , b):
23 cost = Matrix[a][c] + Matrix[c+1][b] + array[a-1]*array[c]*array[b]
24 if cost < Matrix[a][b]:
25 Matrix[a][b] = cost
26 Sol[a][b] = c
27 return Matrix , Sol
28 #Print order of matrix with Ai as Matrix
29 def PrintOptimalSolution(OptimalSolution,i,j):
30 if i==j:
31 print("A" + str(i),end = " ")
32 else:
33 print("(",end = " ")
34 PrintOptimalSolution(OptimalSolution,i,OptimalSolution[i][j])
35 PrintOptimalSolution(OptimalSolution,OptimalSolution[i][j]+1,j)
36 print(")",end = " ")
37
38 def main():
39 array=[30,35,15,5,10,20,25]
40 n=len(array)
41 #Size of matrix created from above array will be
42 # 30*35 35*15 15*5 5*10 10*20 20*25
43 Matrix , OptimalSolution = MatrixChainOrder(array)
44
45 print("No. of Operation required: "+str((Matrix[1][n-1])))
46 PrintOptimalSolution(OptimalSolution,1,n-1)
47 if __name__ == '__main__':
48 main()
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dynamic_programming/matrix_chain_order.py b/dynamic_programming/matrix_chain_order.py
--- a/dynamic_programming/matrix_chain_order.py
+++ b/dynamic_programming/matrix_chain_order.py
@@ -11,8 +11,6 @@
N=len(array)
Matrix=[[0 for x in range(N)] for x in range(N)]
Sol=[[0 for x in range(N)] for x in range(N)]
- for i in range(1,N):
- Matrix[i][i]=0
for ChainLength in range(2,N):
for a in range(1,N-ChainLength+1):
| {"golden_diff": "diff --git a/dynamic_programming/matrix_chain_order.py b/dynamic_programming/matrix_chain_order.py\n--- a/dynamic_programming/matrix_chain_order.py\n+++ b/dynamic_programming/matrix_chain_order.py\n@@ -11,8 +11,6 @@\n N=len(array)\n Matrix=[[0 for x in range(N)] for x in range(N)]\n Sol=[[0 for x in range(N)] for x in range(N)]\n- for i in range(1,N):\n- Matrix[i][i]=0\n \n for ChainLength in range(2,N):\n for a in range(1,N-ChainLength+1):\n", "issue": "Unnecessary Loop \nhttps://github.com/TheAlgorithms/Python/blob/4e0184a41dd3e4838da484057d25e17234353da0/dynamic_programming/matrix_chain_order.py#L12-L15\r\n\r\n`Line 12` creates a NxN Matrix that **contains 0**. So there is no need for `Line 14-15`\n", "before_files": [{"content": "from __future__ import print_function\n\nimport sys\n'''\nDynamic Programming\nImplementation of Matrix Chain Multiplication\nTime Complexity: O(n^3)\nSpace Complexity: O(n^2)\n'''\ndef MatrixChainOrder(array):\n N=len(array)\n Matrix=[[0 for x in range(N)] for x in range(N)]\n Sol=[[0 for x in range(N)] for x in range(N)]\n for i in range(1,N):\n Matrix[i][i]=0\n\n for ChainLength in range(2,N):\n for a in range(1,N-ChainLength+1):\n b = a+ChainLength-1\n\n Matrix[a][b] = sys.maxsize\n for c in range(a , b):\n cost = Matrix[a][c] + Matrix[c+1][b] + array[a-1]*array[c]*array[b]\n if cost < Matrix[a][b]:\n Matrix[a][b] = cost\n Sol[a][b] = c\n return Matrix , Sol\n#Print order of matrix with Ai as Matrix\ndef PrintOptimalSolution(OptimalSolution,i,j):\n if i==j:\n print(\"A\" + str(i),end = \" \")\n else:\n print(\"(\",end = \" \")\n PrintOptimalSolution(OptimalSolution,i,OptimalSolution[i][j])\n PrintOptimalSolution(OptimalSolution,OptimalSolution[i][j]+1,j)\n print(\")\",end = \" \")\n\ndef main():\n array=[30,35,15,5,10,20,25]\n n=len(array)\n #Size of matrix created from above array will be\n # 30*35 35*15 15*5 5*10 10*20 20*25\n Matrix , OptimalSolution = MatrixChainOrder(array)\n\n print(\"No. of Operation required: \"+str((Matrix[1][n-1])))\n PrintOptimalSolution(OptimalSolution,1,n-1)\nif __name__ == '__main__':\n main()\n", "path": "dynamic_programming/matrix_chain_order.py"}], "after_files": [{"content": "from __future__ import print_function\n\nimport sys\n'''\nDynamic Programming\nImplementation of Matrix Chain Multiplication\nTime Complexity: O(n^3)\nSpace Complexity: O(n^2)\n'''\ndef MatrixChainOrder(array):\n N=len(array)\n Matrix=[[0 for x in range(N)] for x in range(N)]\n Sol=[[0 for x in range(N)] for x in range(N)]\n\n for ChainLength in range(2,N):\n for a in range(1,N-ChainLength+1):\n b = a+ChainLength-1\n\n Matrix[a][b] = sys.maxsize\n for c in range(a , b):\n cost = Matrix[a][c] + Matrix[c+1][b] + array[a-1]*array[c]*array[b]\n if cost < Matrix[a][b]:\n Matrix[a][b] = cost\n Sol[a][b] = c\n return Matrix , Sol\n#Print order of matrix with Ai as Matrix\ndef PrintOptimalSolution(OptimalSolution,i,j):\n if i==j:\n print(\"A\" + str(i),end = \" \")\n else:\n print(\"(\",end = \" \")\n PrintOptimalSolution(OptimalSolution,i,OptimalSolution[i][j])\n PrintOptimalSolution(OptimalSolution,OptimalSolution[i][j]+1,j)\n print(\")\",end = \" \")\n\ndef main():\n array=[30,35,15,5,10,20,25]\n n=len(array)\n #Size of matrix created from above array will be\n # 30*35 35*15 15*5 5*10 10*20 20*25\n Matrix , OptimalSolution = MatrixChainOrder(array)\n\n print(\"No. of Operation required: \"+str((Matrix[1][n-1])))\n PrintOptimalSolution(OptimalSolution,1,n-1)\nif __name__ == '__main__':\n main()\n", "path": "dynamic_programming/matrix_chain_order.py"}]} | 908 | 140 |
gh_patches_debug_3449 | rasdani/github-patches | git_diff | jazzband__pip-tools-608 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
README broken on PyPI (must be reStructuredText)
The [package description](https://pypi.python.org/pypi/pip-tools/) on PyPI is unreadable since PyPI expects the README in [reStructuredText](http://www.sphinx-doc.org/en/stable/rest.html) file format and we use MarkDown.
Solution A: Convert to reST
---------------------
1. Rename the current `README.md` to `README.rst`
1. Replace the markdown of the badges and the code samples ([example](https://github.com/Organice/djangocms-maps/blob/master/README.rst))
1. Add a `long_description=read_file('README.rst')` line to `setup.py` ([example](https://github.com/Organice/djangocms-maps/blob/master/setup.py#L50))
Solution B: Process before Upload
-------------------
1. Integrate [pypandoc](https://pypi.python.org/pypi/pypandoc) in `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L7-L14))
1. Add a `long_description=convert('README.md', 'rst')` line to `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L49))
------------
Both solutions above will render a nicely formatted, HTML-styled package description on PyPI.
Quality Assurance
--------------
Optionally, you may check your README with [checkdocs](https://github.com/Organice/djangocms-maps/blob/master/tox.ini#L13-L19) before uploading the package to PyPI, because sometimes the reST-to-HTML conversion that PyPI uses fails -- and renders a still hard-to-read, broken, unformatted package description.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """
2 pip-tools keeps your pinned dependencies fresh.
3 """
4 from os.path import abspath, dirname, join
5 from setuptools import find_packages, setup
6
7 def read_file(filename):
8 """Read the contents of a file located relative to setup.py"""
9 with open(join(abspath(dirname(__file__)), filename)) as thefile:
10 return thefile.read()
11
12 setup(
13 name='pip-tools',
14 use_scm_version=True,
15 url='https://github.com/jazzband/pip-tools/',
16 license='BSD',
17 author='Vincent Driessen',
18 author_email='[email protected]',
19 description=__doc__,
20 long_description=read_file('README.rst'),
21 packages=find_packages(exclude=['tests']),
22 setup_requires=['setuptools_scm'],
23 install_requires=[
24 'click>=6',
25 'first',
26 'six',
27 'setuptools'
28 ],
29 zip_safe=False,
30 entry_points={
31 'console_scripts': [
32 'pip-compile = piptools.scripts.compile:cli',
33 'pip-sync = piptools.scripts.sync:cli',
34 ],
35 },
36 platforms='any',
37 classifiers=[
38 'Development Status :: 5 - Production/Stable',
39 'Intended Audience :: Developers',
40 'Intended Audience :: System Administrators',
41 'License :: OSI Approved :: BSD License',
42 'Operating System :: OS Independent',
43 'Programming Language :: Python',
44 'Programming Language :: Python :: 2',
45 'Programming Language :: Python :: 2.7',
46 'Programming Language :: Python :: 3',
47 'Programming Language :: Python :: 3.4',
48 'Programming Language :: Python :: 3.5',
49 'Programming Language :: Python :: 3.6',
50 'Topic :: System :: Systems Administration',
51 ]
52 )
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@
license='BSD',
author='Vincent Driessen',
author_email='[email protected]',
- description=__doc__,
+ description=__doc__.strip(),
long_description=read_file('README.rst'),
packages=find_packages(exclude=['tests']),
setup_requires=['setuptools_scm'],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -16,7 +16,7 @@\n license='BSD',\n author='Vincent Driessen',\n author_email='[email protected]',\n- description=__doc__,\n+ description=__doc__.strip(),\n long_description=read_file('README.rst'),\n packages=find_packages(exclude=['tests']),\n setup_requires=['setuptools_scm'],\n", "issue": "README broken on PyPI (must be reStructuredText)\nThe [package description](https://pypi.python.org/pypi/pip-tools/) on PyPI is unreadable since PyPI expects the README in [reStructuredText](http://www.sphinx-doc.org/en/stable/rest.html) file format and we use MarkDown.\r\n\r\nSolution A: Convert to reST\r\n---------------------\r\n\r\n1. Rename the current `README.md` to `README.rst`\r\n1. Replace the markdown of the badges and the code samples ([example](https://github.com/Organice/djangocms-maps/blob/master/README.rst))\r\n1. Add a `long_description=read_file('README.rst')` line to `setup.py` ([example](https://github.com/Organice/djangocms-maps/blob/master/setup.py#L50))\r\n\r\nSolution B: Process before Upload\r\n-------------------\r\n\r\n1. Integrate [pypandoc](https://pypi.python.org/pypi/pypandoc) in `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L7-L14))\r\n1. Add a `long_description=convert('README.md', 'rst')` line to `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L49))\r\n\r\n------------\r\n\r\nBoth solutions above will render a nicely formatted, HTML-styled package description on PyPI.\r\n\r\nQuality Assurance\r\n--------------\r\n\r\nOptionally, you may check your README with [checkdocs](https://github.com/Organice/djangocms-maps/blob/master/tox.ini#L13-L19) before uploading the package to PyPI, because sometimes the reST-to-HTML conversion that PyPI uses fails -- and renders a still hard-to-read, broken, unformatted package description.\n", "before_files": [{"content": "\"\"\"\npip-tools keeps your pinned dependencies fresh.\n\"\"\"\nfrom os.path import abspath, dirname, join\nfrom setuptools import find_packages, setup\n\ndef read_file(filename):\n \"\"\"Read the contents of a file located relative to setup.py\"\"\"\n with open(join(abspath(dirname(__file__)), filename)) as thefile:\n return thefile.read()\n\nsetup(\n name='pip-tools',\n use_scm_version=True,\n url='https://github.com/jazzband/pip-tools/',\n license='BSD',\n author='Vincent Driessen',\n author_email='[email protected]',\n description=__doc__,\n long_description=read_file('README.rst'),\n packages=find_packages(exclude=['tests']),\n setup_requires=['setuptools_scm'],\n install_requires=[\n 'click>=6',\n 'first',\n 'six',\n 'setuptools'\n ],\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'pip-compile = piptools.scripts.compile:cli',\n 'pip-sync = piptools.scripts.sync:cli',\n ],\n },\n platforms='any',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: System :: Systems Administration',\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"\npip-tools keeps your pinned dependencies fresh.\n\"\"\"\nfrom os.path import abspath, dirname, join\nfrom setuptools import find_packages, setup\n\ndef read_file(filename):\n \"\"\"Read the contents of a file located relative to setup.py\"\"\"\n with open(join(abspath(dirname(__file__)), filename)) as thefile:\n return thefile.read()\n\nsetup(\n name='pip-tools',\n use_scm_version=True,\n url='https://github.com/jazzband/pip-tools/',\n license='BSD',\n author='Vincent Driessen',\n author_email='[email protected]',\n description=__doc__.strip(),\n long_description=read_file('README.rst'),\n packages=find_packages(exclude=['tests']),\n setup_requires=['setuptools_scm'],\n install_requires=[\n 'click>=6',\n 'first',\n 'six',\n 'setuptools'\n ],\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'pip-compile = piptools.scripts.compile:cli',\n 'pip-sync = piptools.scripts.sync:cli',\n ],\n },\n platforms='any',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: System :: Systems Administration',\n ]\n)\n", "path": "setup.py"}]} | 1,116 | 98 |
gh_patches_debug_7898 | rasdani/github-patches | git_diff | googleapis__python-bigquery-1889 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Extend the OTEL traces to track bytes billed and bytes processed
Extend OTEL traces to track bytes billed and bytes processed. Allowing developers to see how much data and cost was consumed as part of BigQueryJob trace
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `google/cloud/bigquery/opentelemetry_tracing.py`
Content:
```
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16 from contextlib import contextmanager
17 from google.api_core.exceptions import GoogleAPICallError # type: ignore
18
19 logger = logging.getLogger(__name__)
20 try:
21 from opentelemetry import trace # type: ignore
22 from opentelemetry.instrumentation.utils import http_status_to_status_code # type: ignore
23 from opentelemetry.trace.status import Status # type: ignore
24
25 HAS_OPENTELEMETRY = True
26 _warned_telemetry = True
27
28 except ImportError:
29 HAS_OPENTELEMETRY = False
30 _warned_telemetry = False
31
32 _default_attributes = {
33 "db.system": "BigQuery"
34 } # static, default values assigned to all spans
35
36
37 @contextmanager
38 def create_span(name, attributes=None, client=None, job_ref=None):
39 """Creates a ContextManager for a Span to be exported to the configured exporter.
40 If no configuration exists yields None.
41
42 Args:
43 name (str): Name that will be set for the span being created
44 attributes (Optional[dict]):
45 Additional attributes that pertain to
46 the specific API call (i.e. not a default attribute)
47 client (Optional[google.cloud.bigquery.client.Client]):
48 Pass in a Client object to extract any attributes that may be
49 relevant to it and add them to the created spans.
50 job_ref (Optional[google.cloud.bigquery.job._AsyncJob])
51 Pass in a _AsyncJob object to extract any attributes that may be
52 relevant to it and add them to the created spans.
53
54 Yields:
55 opentelemetry.trace.Span: Yields the newly created Span.
56
57 Raises:
58 google.api_core.exceptions.GoogleAPICallError:
59 Raised if a span could not be yielded or issue with call to
60 OpenTelemetry.
61 """
62 global _warned_telemetry
63 final_attributes = _get_final_span_attributes(attributes, client, job_ref)
64 if not HAS_OPENTELEMETRY:
65 if not _warned_telemetry:
66 logger.debug(
67 "This service is instrumented using OpenTelemetry. "
68 "OpenTelemetry or one of its components could not be imported; "
69 "please add compatible versions of opentelemetry-api and "
70 "opentelemetry-instrumentation packages in order to get BigQuery "
71 "Tracing data."
72 )
73 _warned_telemetry = True
74
75 yield None
76 return
77 tracer = trace.get_tracer(__name__)
78
79 # yield new span value
80 with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:
81 try:
82 yield span
83 except GoogleAPICallError as error:
84 if error.code is not None:
85 span.set_status(Status(http_status_to_status_code(error.code)))
86 raise
87
88
89 def _get_final_span_attributes(attributes=None, client=None, job_ref=None):
90 """Compiles attributes from: client, job_ref, user-provided attributes.
91
92 Attributes from all of these sources are merged together. Note the
93 attributes are added sequentially based on perceived order of precedence:
94 i.e. attributes added last may overwrite attributes added earlier.
95
96 Args:
97 attributes (Optional[dict]):
98 Additional attributes that pertain to
99 the specific API call (i.e. not a default attribute)
100
101 client (Optional[google.cloud.bigquery.client.Client]):
102 Pass in a Client object to extract any attributes that may be
103 relevant to it and add them to the final_attributes
104
105 job_ref (Optional[google.cloud.bigquery.job._AsyncJob])
106 Pass in a _AsyncJob object to extract any attributes that may be
107 relevant to it and add them to the final_attributes.
108
109 Returns: dict
110 """
111
112 collected_attributes = _default_attributes.copy()
113
114 if client:
115 collected_attributes.update(_set_client_attributes(client))
116 if job_ref:
117 collected_attributes.update(_set_job_attributes(job_ref))
118 if attributes:
119 collected_attributes.update(attributes)
120
121 final_attributes = {k: v for k, v in collected_attributes.items() if v is not None}
122 return final_attributes
123
124
125 def _set_client_attributes(client):
126 return {"db.name": client.project, "location": client.location}
127
128
129 def _set_job_attributes(job_ref):
130 job_attributes = {
131 "db.name": job_ref.project,
132 "job_id": job_ref.job_id,
133 "state": job_ref.state,
134 }
135
136 job_attributes["hasErrors"] = job_ref.error_result is not None
137
138 if job_ref.created is not None:
139 job_attributes["timeCreated"] = job_ref.created.isoformat()
140
141 if job_ref.started is not None:
142 job_attributes["timeStarted"] = job_ref.started.isoformat()
143
144 if job_ref.ended is not None:
145 job_attributes["timeEnded"] = job_ref.ended.isoformat()
146
147 if job_ref.location is not None:
148 job_attributes["location"] = job_ref.location
149
150 if job_ref.parent_job_id is not None:
151 job_attributes["parent_job_id"] = job_ref.parent_job_id
152
153 if job_ref.num_child_jobs is not None:
154 job_attributes["num_child_jobs"] = job_ref.num_child_jobs
155
156 return job_attributes
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/google/cloud/bigquery/opentelemetry_tracing.py b/google/cloud/bigquery/opentelemetry_tracing.py
--- a/google/cloud/bigquery/opentelemetry_tracing.py
+++ b/google/cloud/bigquery/opentelemetry_tracing.py
@@ -153,4 +153,12 @@
if job_ref.num_child_jobs is not None:
job_attributes["num_child_jobs"] = job_ref.num_child_jobs
+ total_bytes_billed = getattr(job_ref, "total_bytes_billed", None)
+ if total_bytes_billed is not None:
+ job_attributes["total_bytes_billed"] = total_bytes_billed
+
+ total_bytes_processed = getattr(job_ref, "total_bytes_processed", None)
+ if total_bytes_processed is not None:
+ job_attributes["total_bytes_processed"] = total_bytes_processed
+
return job_attributes
| {"golden_diff": "diff --git a/google/cloud/bigquery/opentelemetry_tracing.py b/google/cloud/bigquery/opentelemetry_tracing.py\n--- a/google/cloud/bigquery/opentelemetry_tracing.py\n+++ b/google/cloud/bigquery/opentelemetry_tracing.py\n@@ -153,4 +153,12 @@\n if job_ref.num_child_jobs is not None:\n job_attributes[\"num_child_jobs\"] = job_ref.num_child_jobs\n \n+ total_bytes_billed = getattr(job_ref, \"total_bytes_billed\", None)\n+ if total_bytes_billed is not None:\n+ job_attributes[\"total_bytes_billed\"] = total_bytes_billed\n+\n+ total_bytes_processed = getattr(job_ref, \"total_bytes_processed\", None)\n+ if total_bytes_processed is not None:\n+ job_attributes[\"total_bytes_processed\"] = total_bytes_processed\n+\n return job_attributes\n", "issue": "Extend the OTEL traces to track bytes billed and bytes processed\nExtend OTEL traces to track bytes billed and bytes processed. Allowing developers to see how much data and cost was consumed as part of BigQueryJob trace \n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom contextlib import contextmanager\nfrom google.api_core.exceptions import GoogleAPICallError # type: ignore\n\nlogger = logging.getLogger(__name__)\ntry:\n from opentelemetry import trace # type: ignore\n from opentelemetry.instrumentation.utils import http_status_to_status_code # type: ignore\n from opentelemetry.trace.status import Status # type: ignore\n\n HAS_OPENTELEMETRY = True\n _warned_telemetry = True\n\nexcept ImportError:\n HAS_OPENTELEMETRY = False\n _warned_telemetry = False\n\n_default_attributes = {\n \"db.system\": \"BigQuery\"\n} # static, default values assigned to all spans\n\n\n@contextmanager\ndef create_span(name, attributes=None, client=None, job_ref=None):\n \"\"\"Creates a ContextManager for a Span to be exported to the configured exporter.\n If no configuration exists yields None.\n\n Args:\n name (str): Name that will be set for the span being created\n attributes (Optional[dict]):\n Additional attributes that pertain to\n the specific API call (i.e. not a default attribute)\n client (Optional[google.cloud.bigquery.client.Client]):\n Pass in a Client object to extract any attributes that may be\n relevant to it and add them to the created spans.\n job_ref (Optional[google.cloud.bigquery.job._AsyncJob])\n Pass in a _AsyncJob object to extract any attributes that may be\n relevant to it and add them to the created spans.\n\n Yields:\n opentelemetry.trace.Span: Yields the newly created Span.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError:\n Raised if a span could not be yielded or issue with call to\n OpenTelemetry.\n \"\"\"\n global _warned_telemetry\n final_attributes = _get_final_span_attributes(attributes, client, job_ref)\n if not HAS_OPENTELEMETRY:\n if not _warned_telemetry:\n logger.debug(\n \"This service is instrumented using OpenTelemetry. \"\n \"OpenTelemetry or one of its components could not be imported; \"\n \"please add compatible versions of opentelemetry-api and \"\n \"opentelemetry-instrumentation packages in order to get BigQuery \"\n \"Tracing data.\"\n )\n _warned_telemetry = True\n\n yield None\n return\n tracer = trace.get_tracer(__name__)\n\n # yield new span value\n with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:\n try:\n yield span\n except GoogleAPICallError as error:\n if error.code is not None:\n span.set_status(Status(http_status_to_status_code(error.code)))\n raise\n\n\ndef _get_final_span_attributes(attributes=None, client=None, job_ref=None):\n \"\"\"Compiles attributes from: client, job_ref, user-provided attributes.\n\n Attributes from all of these sources are merged together. Note the\n attributes are added sequentially based on perceived order of precedence:\n i.e. attributes added last may overwrite attributes added earlier.\n\n Args:\n attributes (Optional[dict]):\n Additional attributes that pertain to\n the specific API call (i.e. not a default attribute)\n\n client (Optional[google.cloud.bigquery.client.Client]):\n Pass in a Client object to extract any attributes that may be\n relevant to it and add them to the final_attributes\n\n job_ref (Optional[google.cloud.bigquery.job._AsyncJob])\n Pass in a _AsyncJob object to extract any attributes that may be\n relevant to it and add them to the final_attributes.\n\n Returns: dict\n \"\"\"\n\n collected_attributes = _default_attributes.copy()\n\n if client:\n collected_attributes.update(_set_client_attributes(client))\n if job_ref:\n collected_attributes.update(_set_job_attributes(job_ref))\n if attributes:\n collected_attributes.update(attributes)\n\n final_attributes = {k: v for k, v in collected_attributes.items() if v is not None}\n return final_attributes\n\n\ndef _set_client_attributes(client):\n return {\"db.name\": client.project, \"location\": client.location}\n\n\ndef _set_job_attributes(job_ref):\n job_attributes = {\n \"db.name\": job_ref.project,\n \"job_id\": job_ref.job_id,\n \"state\": job_ref.state,\n }\n\n job_attributes[\"hasErrors\"] = job_ref.error_result is not None\n\n if job_ref.created is not None:\n job_attributes[\"timeCreated\"] = job_ref.created.isoformat()\n\n if job_ref.started is not None:\n job_attributes[\"timeStarted\"] = job_ref.started.isoformat()\n\n if job_ref.ended is not None:\n job_attributes[\"timeEnded\"] = job_ref.ended.isoformat()\n\n if job_ref.location is not None:\n job_attributes[\"location\"] = job_ref.location\n\n if job_ref.parent_job_id is not None:\n job_attributes[\"parent_job_id\"] = job_ref.parent_job_id\n\n if job_ref.num_child_jobs is not None:\n job_attributes[\"num_child_jobs\"] = job_ref.num_child_jobs\n\n return job_attributes\n", "path": "google/cloud/bigquery/opentelemetry_tracing.py"}], "after_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nfrom contextlib import contextmanager\nfrom google.api_core.exceptions import GoogleAPICallError # type: ignore\n\nlogger = logging.getLogger(__name__)\ntry:\n from opentelemetry import trace # type: ignore\n from opentelemetry.instrumentation.utils import http_status_to_status_code # type: ignore\n from opentelemetry.trace.status import Status # type: ignore\n\n HAS_OPENTELEMETRY = True\n _warned_telemetry = True\n\nexcept ImportError:\n HAS_OPENTELEMETRY = False\n _warned_telemetry = False\n\n_default_attributes = {\n \"db.system\": \"BigQuery\"\n} # static, default values assigned to all spans\n\n\n@contextmanager\ndef create_span(name, attributes=None, client=None, job_ref=None):\n \"\"\"Creates a ContextManager for a Span to be exported to the configured exporter.\n If no configuration exists yields None.\n\n Args:\n name (str): Name that will be set for the span being created\n attributes (Optional[dict]):\n Additional attributes that pertain to\n the specific API call (i.e. not a default attribute)\n client (Optional[google.cloud.bigquery.client.Client]):\n Pass in a Client object to extract any attributes that may be\n relevant to it and add them to the created spans.\n job_ref (Optional[google.cloud.bigquery.job._AsyncJob])\n Pass in a _AsyncJob object to extract any attributes that may be\n relevant to it and add them to the created spans.\n\n Yields:\n opentelemetry.trace.Span: Yields the newly created Span.\n\n Raises:\n google.api_core.exceptions.GoogleAPICallError:\n Raised if a span could not be yielded or issue with call to\n OpenTelemetry.\n \"\"\"\n global _warned_telemetry\n final_attributes = _get_final_span_attributes(attributes, client, job_ref)\n if not HAS_OPENTELEMETRY:\n if not _warned_telemetry:\n logger.debug(\n \"This service is instrumented using OpenTelemetry. \"\n \"OpenTelemetry or one of its components could not be imported; \"\n \"please add compatible versions of opentelemetry-api and \"\n \"opentelemetry-instrumentation packages in order to get BigQuery \"\n \"Tracing data.\"\n )\n _warned_telemetry = True\n\n yield None\n return\n tracer = trace.get_tracer(__name__)\n\n # yield new span value\n with tracer.start_as_current_span(name=name, attributes=final_attributes) as span:\n try:\n yield span\n except GoogleAPICallError as error:\n if error.code is not None:\n span.set_status(Status(http_status_to_status_code(error.code)))\n raise\n\n\ndef _get_final_span_attributes(attributes=None, client=None, job_ref=None):\n \"\"\"Compiles attributes from: client, job_ref, user-provided attributes.\n\n Attributes from all of these sources are merged together. Note the\n attributes are added sequentially based on perceived order of precedence:\n i.e. attributes added last may overwrite attributes added earlier.\n\n Args:\n attributes (Optional[dict]):\n Additional attributes that pertain to\n the specific API call (i.e. not a default attribute)\n\n client (Optional[google.cloud.bigquery.client.Client]):\n Pass in a Client object to extract any attributes that may be\n relevant to it and add them to the final_attributes\n\n job_ref (Optional[google.cloud.bigquery.job._AsyncJob])\n Pass in a _AsyncJob object to extract any attributes that may be\n relevant to it and add them to the final_attributes.\n\n Returns: dict\n \"\"\"\n\n collected_attributes = _default_attributes.copy()\n\n if client:\n collected_attributes.update(_set_client_attributes(client))\n if job_ref:\n collected_attributes.update(_set_job_attributes(job_ref))\n if attributes:\n collected_attributes.update(attributes)\n\n final_attributes = {k: v for k, v in collected_attributes.items() if v is not None}\n return final_attributes\n\n\ndef _set_client_attributes(client):\n return {\"db.name\": client.project, \"location\": client.location}\n\n\ndef _set_job_attributes(job_ref):\n job_attributes = {\n \"db.name\": job_ref.project,\n \"job_id\": job_ref.job_id,\n \"state\": job_ref.state,\n }\n\n job_attributes[\"hasErrors\"] = job_ref.error_result is not None\n\n if job_ref.created is not None:\n job_attributes[\"timeCreated\"] = job_ref.created.isoformat()\n\n if job_ref.started is not None:\n job_attributes[\"timeStarted\"] = job_ref.started.isoformat()\n\n if job_ref.ended is not None:\n job_attributes[\"timeEnded\"] = job_ref.ended.isoformat()\n\n if job_ref.location is not None:\n job_attributes[\"location\"] = job_ref.location\n\n if job_ref.parent_job_id is not None:\n job_attributes[\"parent_job_id\"] = job_ref.parent_job_id\n\n if job_ref.num_child_jobs is not None:\n job_attributes[\"num_child_jobs\"] = job_ref.num_child_jobs\n\n total_bytes_billed = getattr(job_ref, \"total_bytes_billed\", None)\n if total_bytes_billed is not None:\n job_attributes[\"total_bytes_billed\"] = total_bytes_billed\n\n total_bytes_processed = getattr(job_ref, \"total_bytes_processed\", None)\n if total_bytes_processed is not None:\n job_attributes[\"total_bytes_processed\"] = total_bytes_processed\n\n return job_attributes\n", "path": "google/cloud/bigquery/opentelemetry_tracing.py"}]} | 1,921 | 187 |
gh_patches_debug_2307 | rasdani/github-patches | git_diff | huggingface__text-generation-inference-1089 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Flash attention (v1) is broken
Defaulting window_size_left to "-1" breaks flash attention v1 since v1 only works if window_size_left is 0 (line 83).
https://github.com/huggingface/text-generation-inference/blob/5ba53d44a18983a4de32d122f4cb46f4a17d9ef6/server/text_generation_server/utils/flash_attn.py#L60C7-L60C7
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `server/text_generation_server/utils/flash_attn.py`
Content:
```
1 import os
2 import torch
3
4 from loguru import logger
5
6 if os.getenv("USE_FLASH_ATTENTION", "").lower() == "false":
7 raise ImportError("`USE_FLASH_ATTENTION` is false.")
8
9 if not torch.cuda.is_available():
10 raise ImportError("CUDA is not available")
11
12 major, minor = torch.cuda.get_device_capability()
13 is_sm75 = major == 7 and minor == 5
14 is_sm8x = major == 8 and minor >= 0
15 is_sm90 = major == 9 and minor == 0
16
17 HAS_FLASH_ATTN = False
18 HAS_FLASH_ATTN_V2 = False
19 try:
20 try:
21 import flash_attn_2_cuda
22 except ImportError:
23 raise ImportError(
24 "Flash Attention V2 is not installed.\n"
25 "Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) "
26 "or install flash attention v2 with `cd server && make install install-flash-attention-v2`"
27 )
28 if not (is_sm8x or is_sm90):
29 raise ImportError(
30 f"GPU with CUDA capability {major} {minor} is not supported for "
31 "Flash Attention V2"
32 )
33 HAS_FLASH_ATTN_V2 = True
34 except ImportError as e:
35 try:
36 import flash_attn_cuda
37 except ImportError:
38 raise ImportError(
39 "Flash Attention is not installed.\n"
40 "Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) "
41 "or install flash attention with `cd server && make install install-flash-attention`"
42 ) from e
43
44 if not (is_sm75 or is_sm8x or is_sm90):
45 raise ImportError(
46 f"GPU with CUDA capability {major} {minor} is not supported"
47 ) from e
48 logger.warning(f"Unable to use Flash Attention V2: {e}")
49 HAS_FLASH_ATTN = True
50
51
52 def attention(
53 q,
54 k,
55 v,
56 out,
57 cu_seqlens,
58 max_s,
59 softmax_scale,
60 window_size_left=-1,
61 ):
62 if HAS_FLASH_ATTN_V2:
63 return flash_attn_2_cuda.varlen_fwd(
64 q,
65 k,
66 v,
67 out,
68 cu_seqlens,
69 cu_seqlens,
70 max_s,
71 max_s,
72 0.0,
73 softmax_scale,
74 False,
75 True,
76 window_size_left,
77 0,
78 False,
79 None,
80 )
81
82 if HAS_FLASH_ATTN:
83 if window_size_left != 0:
84 raise NotImplementedError(
85 "window_size_left is only available with flash attn v2"
86 )
87
88 # Flash attention v1 requires q, k and v to have the same number of heads
89 if k.shape[1] != q.shape[1]:
90 # MQA expand
91 if k.shape[1] == 1:
92 k = k.expand(-1, q.shape[1], -1)
93 # Grouped attention reshape
94 else:
95 original_shape = k.shape
96 k = (
97 k.unsqueeze(2)
98 .expand(-1, -1, q.shape[1] // k.shape[1], -1)
99 .reshape(original_shape[0], -1, original_shape[2])
100 )
101 if v.shape[1] != q.shape[1]:
102 # MQA expand
103 if v.shape[1] == 1:
104 v = v.expand(-1, q.shape[1], -1)
105 # Grouped attention reshape
106 else:
107 original_shape = v.shape
108 v = (
109 v.unsqueeze(2)
110 .expand(-1, -1, q.shape[1] // v.shape[1], -1)
111 .reshape(original_shape[0], -1, original_shape[2])
112 )
113
114 return flash_attn_cuda.fwd(
115 q,
116 k,
117 v,
118 out,
119 cu_seqlens,
120 cu_seqlens,
121 max_s,
122 max_s,
123 0.0,
124 softmax_scale,
125 False,
126 True,
127 False,
128 0,
129 None,
130 )
131
132 raise NotImplementedError("flash attention is not installed")
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/server/text_generation_server/utils/flash_attn.py b/server/text_generation_server/utils/flash_attn.py
--- a/server/text_generation_server/utils/flash_attn.py
+++ b/server/text_generation_server/utils/flash_attn.py
@@ -80,7 +80,7 @@
)
if HAS_FLASH_ATTN:
- if window_size_left != 0:
+ if window_size_left != -1:
raise NotImplementedError(
"window_size_left is only available with flash attn v2"
)
| {"golden_diff": "diff --git a/server/text_generation_server/utils/flash_attn.py b/server/text_generation_server/utils/flash_attn.py\n--- a/server/text_generation_server/utils/flash_attn.py\n+++ b/server/text_generation_server/utils/flash_attn.py\n@@ -80,7 +80,7 @@\n )\n \n if HAS_FLASH_ATTN:\n- if window_size_left != 0:\n+ if window_size_left != -1:\n raise NotImplementedError(\n \"window_size_left is only available with flash attn v2\"\n )\n", "issue": "Flash attention (v1) is broken\nDefaulting window_size_left to \"-1\" breaks flash attention v1 since v1 only works if window_size_left is 0 (line 83).\r\n\r\nhttps://github.com/huggingface/text-generation-inference/blob/5ba53d44a18983a4de32d122f4cb46f4a17d9ef6/server/text_generation_server/utils/flash_attn.py#L60C7-L60C7\n", "before_files": [{"content": "import os\nimport torch\n\nfrom loguru import logger\n\nif os.getenv(\"USE_FLASH_ATTENTION\", \"\").lower() == \"false\":\n raise ImportError(\"`USE_FLASH_ATTENTION` is false.\")\n\nif not torch.cuda.is_available():\n raise ImportError(\"CUDA is not available\")\n\nmajor, minor = torch.cuda.get_device_capability()\nis_sm75 = major == 7 and minor == 5\nis_sm8x = major == 8 and minor >= 0\nis_sm90 = major == 9 and minor == 0\n\nHAS_FLASH_ATTN = False\nHAS_FLASH_ATTN_V2 = False\ntry:\n try:\n import flash_attn_2_cuda\n except ImportError:\n raise ImportError(\n \"Flash Attention V2 is not installed.\\n\"\n \"Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) \"\n \"or install flash attention v2 with `cd server && make install install-flash-attention-v2`\"\n )\n if not (is_sm8x or is_sm90):\n raise ImportError(\n f\"GPU with CUDA capability {major} {minor} is not supported for \"\n \"Flash Attention V2\"\n )\n HAS_FLASH_ATTN_V2 = True\nexcept ImportError as e:\n try:\n import flash_attn_cuda\n except ImportError:\n raise ImportError(\n \"Flash Attention is not installed.\\n\"\n \"Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) \"\n \"or install flash attention with `cd server && make install install-flash-attention`\"\n ) from e\n\n if not (is_sm75 or is_sm8x or is_sm90):\n raise ImportError(\n f\"GPU with CUDA capability {major} {minor} is not supported\"\n ) from e\n logger.warning(f\"Unable to use Flash Attention V2: {e}\")\n HAS_FLASH_ATTN = True\n\n\ndef attention(\n q,\n k,\n v,\n out,\n cu_seqlens,\n max_s,\n softmax_scale,\n window_size_left=-1,\n):\n if HAS_FLASH_ATTN_V2:\n return flash_attn_2_cuda.varlen_fwd(\n q,\n k,\n v,\n out,\n cu_seqlens,\n cu_seqlens,\n max_s,\n max_s,\n 0.0,\n softmax_scale,\n False,\n True,\n window_size_left,\n 0,\n False,\n None,\n )\n\n if HAS_FLASH_ATTN:\n if window_size_left != 0:\n raise NotImplementedError(\n \"window_size_left is only available with flash attn v2\"\n )\n\n # Flash attention v1 requires q, k and v to have the same number of heads\n if k.shape[1] != q.shape[1]:\n # MQA expand\n if k.shape[1] == 1:\n k = k.expand(-1, q.shape[1], -1)\n # Grouped attention reshape\n else:\n original_shape = k.shape\n k = (\n k.unsqueeze(2)\n .expand(-1, -1, q.shape[1] // k.shape[1], -1)\n .reshape(original_shape[0], -1, original_shape[2])\n )\n if v.shape[1] != q.shape[1]:\n # MQA expand\n if v.shape[1] == 1:\n v = v.expand(-1, q.shape[1], -1)\n # Grouped attention reshape\n else:\n original_shape = v.shape\n v = (\n v.unsqueeze(2)\n .expand(-1, -1, q.shape[1] // v.shape[1], -1)\n .reshape(original_shape[0], -1, original_shape[2])\n )\n\n return flash_attn_cuda.fwd(\n q,\n k,\n v,\n out,\n cu_seqlens,\n cu_seqlens,\n max_s,\n max_s,\n 0.0,\n softmax_scale,\n False,\n True,\n False,\n 0,\n None,\n )\n\n raise NotImplementedError(\"flash attention is not installed\")\n", "path": "server/text_generation_server/utils/flash_attn.py"}], "after_files": [{"content": "import os\nimport torch\n\nfrom loguru import logger\n\nif os.getenv(\"USE_FLASH_ATTENTION\", \"\").lower() == \"false\":\n raise ImportError(\"`USE_FLASH_ATTENTION` is false.\")\n\nif not torch.cuda.is_available():\n raise ImportError(\"CUDA is not available\")\n\nmajor, minor = torch.cuda.get_device_capability()\nis_sm75 = major == 7 and minor == 5\nis_sm8x = major == 8 and minor >= 0\nis_sm90 = major == 9 and minor == 0\n\nHAS_FLASH_ATTN = False\nHAS_FLASH_ATTN_V2 = False\ntry:\n try:\n import flash_attn_2_cuda\n except ImportError:\n raise ImportError(\n \"Flash Attention V2 is not installed.\\n\"\n \"Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) \"\n \"or install flash attention v2 with `cd server && make install install-flash-attention-v2`\"\n )\n if not (is_sm8x or is_sm90):\n raise ImportError(\n f\"GPU with CUDA capability {major} {minor} is not supported for \"\n \"Flash Attention V2\"\n )\n HAS_FLASH_ATTN_V2 = True\nexcept ImportError as e:\n try:\n import flash_attn_cuda\n except ImportError:\n raise ImportError(\n \"Flash Attention is not installed.\\n\"\n \"Use the official Docker image (ghcr.io/huggingface/text-generation-inference:latest) \"\n \"or install flash attention with `cd server && make install install-flash-attention`\"\n ) from e\n\n if not (is_sm75 or is_sm8x or is_sm90):\n raise ImportError(\n f\"GPU with CUDA capability {major} {minor} is not supported\"\n ) from e\n logger.warning(f\"Unable to use Flash Attention V2: {e}\")\n HAS_FLASH_ATTN = True\n\n\ndef attention(\n q,\n k,\n v,\n out,\n cu_seqlens,\n max_s,\n softmax_scale,\n window_size_left=-1,\n):\n if HAS_FLASH_ATTN_V2:\n return flash_attn_2_cuda.varlen_fwd(\n q,\n k,\n v,\n out,\n cu_seqlens,\n cu_seqlens,\n max_s,\n max_s,\n 0.0,\n softmax_scale,\n False,\n True,\n window_size_left,\n 0,\n False,\n None,\n )\n\n if HAS_FLASH_ATTN:\n if window_size_left != -1:\n raise NotImplementedError(\n \"window_size_left is only available with flash attn v2\"\n )\n\n # Flash attention v1 requires q, k and v to have the same number of heads\n if k.shape[1] != q.shape[1]:\n # MQA expand\n if k.shape[1] == 1:\n k = k.expand(-1, q.shape[1], -1)\n # Grouped attention reshape\n else:\n original_shape = k.shape\n k = (\n k.unsqueeze(2)\n .expand(-1, -1, q.shape[1] // k.shape[1], -1)\n .reshape(original_shape[0], -1, original_shape[2])\n )\n if v.shape[1] != q.shape[1]:\n # MQA expand\n if v.shape[1] == 1:\n v = v.expand(-1, q.shape[1], -1)\n # Grouped attention reshape\n else:\n original_shape = v.shape\n v = (\n v.unsqueeze(2)\n .expand(-1, -1, q.shape[1] // v.shape[1], -1)\n .reshape(original_shape[0], -1, original_shape[2])\n )\n\n return flash_attn_cuda.fwd(\n q,\n k,\n v,\n out,\n cu_seqlens,\n cu_seqlens,\n max_s,\n max_s,\n 0.0,\n softmax_scale,\n False,\n True,\n False,\n 0,\n None,\n )\n\n raise NotImplementedError(\"flash attention is not installed\")\n", "path": "server/text_generation_server/utils/flash_attn.py"}]} | 1,577 | 114 |
gh_patches_debug_15818 | rasdani/github-patches | git_diff | intel__dffml-177 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
model: scikit: setup.py needs correct entrypoints
```diff
diff --git a/model/scikit/setup.py b/model/scikit/setup.py
index 1bd6284..efbff80 100644
--- a/model/scikit/setup.py
+++ b/model/scikit/setup.py
@@ -57,5 +57,9 @@ setup(
],
install_requires=INSTALL_REQUIRES,
packages=find_packages(),
- entry_points={"dffml.model": [f"scikitlr = {IMPORT_NAME}.sciLR:LR"]},
+ entry_points={"dffml.model": [
+ f"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifierModel",
+ f"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifierModel",
+ f"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifierModel",
+ ]},
)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `model/scikit/setup.py`
Content:
```
1 import os
2 import ast
3 from io import open
4 from setuptools import find_packages, setup
5
6 ORG = "intel"
7 NAME = "dffml-model-scikit"
8 DESCRIPTION = "DFFML model scikit"
9 AUTHOR_NAME = "Yash Lamba"
10 AUTHOR_EMAIL = "[email protected]"
11 INSTALL_REQUIRES = ["scikit-learn>=0.21.2", "joblib>=0.13.2", "pandas>=0.25.0"]
12
13 IMPORT_NAME = (
14 NAME
15 if "replace_package_name".upper() != NAME
16 else "replace_import_package_name".upper()
17 ).replace("-", "_")
18
19 SELF_PATH = os.path.dirname(os.path.realpath(__file__))
20
21 with open(os.path.join(SELF_PATH, IMPORT_NAME, "version.py"), "r") as f:
22 for line in f:
23 if line.startswith("VERSION"):
24 version = ast.literal_eval(line.strip().split("=")[-1].strip())
25 break
26
27 with open(os.path.join(SELF_PATH, "README.md"), "r", encoding="utf-8") as f:
28 readme = f.read()
29
30 setup(
31 name="dffml-model-scikit",
32 version=version,
33 description="",
34 long_description=readme,
35 long_description_content_type="text/markdown",
36 author="Yash Lamba",
37 author_email="[email protected]",
38 maintainer="John Andersen",
39 maintainer_email="[email protected]",
40 url="https://github.com/intel/dffml/blob/master/model/scikit/README.md",
41 license="MIT",
42 keywords=["dffml"],
43 classifiers=[
44 "Development Status :: 3 - Alpha",
45 "Intended Audience :: Developers",
46 "License :: OSI Approved :: MIT License",
47 "Natural Language :: English",
48 "Operating System :: OS Independent",
49 "Programming Language :: Python :: 3 :: Only",
50 "Programming Language :: Python :: 3.7",
51 "Programming Language :: Python :: Implementation :: CPython",
52 "Programming Language :: Python :: Implementation :: PyPy",
53 ],
54 install_requires=INSTALL_REQUIRES,
55 packages=find_packages(),
56 entry_points={"dffml.model": [f"scikitlr = {IMPORT_NAME}.sciLR:LR"]},
57 )
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/model/scikit/setup.py b/model/scikit/setup.py
--- a/model/scikit/setup.py
+++ b/model/scikit/setup.py
@@ -53,5 +53,18 @@
],
install_requires=INSTALL_REQUIRES,
packages=find_packages(),
- entry_points={"dffml.model": [f"scikitlr = {IMPORT_NAME}.sciLR:LR"]},
+ entry_points={
+ "dffml.model": [
+ f"scikitknn = {IMPORT_NAME}.scikit_models:KNeighborsClassifier",
+ f"scikitadaboost = {IMPORT_NAME}.scikit_models:AdaBoostClassifier",
+ f"scikitgpc = {IMPORT_NAME}.scikit_models:GaussianProcessClassifier",
+ f"scikitdtc = {IMPORT_NAME}.scikit_models:DecisionTreeClassifier",
+ f"scikitrfc = {IMPORT_NAME}.scikit_models:RandomForestClassifier",
+ f"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifier",
+ f"scikitgnb = {IMPORT_NAME}.scikit_models:GaussianNB",
+ f"scikitqda = {IMPORT_NAME}.scikit_models:QuadraticDiscriminantAnalysis",
+ f"scikitsvc = {IMPORT_NAME}.scikit_models:SVC",
+ f"scikitlr = {IMPORT_NAME}.scikit_models:LinearRegression",
+ ]
+ },
)
| {"golden_diff": "diff --git a/model/scikit/setup.py b/model/scikit/setup.py\n--- a/model/scikit/setup.py\n+++ b/model/scikit/setup.py\n@@ -53,5 +53,18 @@\n ],\n install_requires=INSTALL_REQUIRES,\n packages=find_packages(),\n- entry_points={\"dffml.model\": [f\"scikitlr = {IMPORT_NAME}.sciLR:LR\"]},\n+ entry_points={\n+ \"dffml.model\": [\n+ f\"scikitknn = {IMPORT_NAME}.scikit_models:KNeighborsClassifier\",\n+ f\"scikitadaboost = {IMPORT_NAME}.scikit_models:AdaBoostClassifier\",\n+ f\"scikitgpc = {IMPORT_NAME}.scikit_models:GaussianProcessClassifier\",\n+ f\"scikitdtc = {IMPORT_NAME}.scikit_models:DecisionTreeClassifier\",\n+ f\"scikitrfc = {IMPORT_NAME}.scikit_models:RandomForestClassifier\",\n+ f\"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifier\",\n+ f\"scikitgnb = {IMPORT_NAME}.scikit_models:GaussianNB\",\n+ f\"scikitqda = {IMPORT_NAME}.scikit_models:QuadraticDiscriminantAnalysis\",\n+ f\"scikitsvc = {IMPORT_NAME}.scikit_models:SVC\",\n+ f\"scikitlr = {IMPORT_NAME}.scikit_models:LinearRegression\",\n+ ]\n+ },\n )\n", "issue": "model: scikit: setup.py needs correct entrypoints\n```diff\r\ndiff --git a/model/scikit/setup.py b/model/scikit/setup.py\r\nindex 1bd6284..efbff80 100644\r\n--- a/model/scikit/setup.py\r\n+++ b/model/scikit/setup.py\r\n@@ -57,5 +57,9 @@ setup(\r\n ],\r\n install_requires=INSTALL_REQUIRES,\r\n packages=find_packages(),\r\n- entry_points={\"dffml.model\": [f\"scikitlr = {IMPORT_NAME}.sciLR:LR\"]},\r\n+ entry_points={\"dffml.model\": [\r\n+ f\"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifierModel\",\r\n+ f\"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifierModel\",\r\n+ f\"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifierModel\",\r\n+ ]},\r\n )\r\n```\n", "before_files": [{"content": "import os\nimport ast\nfrom io import open\nfrom setuptools import find_packages, setup\n\nORG = \"intel\"\nNAME = \"dffml-model-scikit\"\nDESCRIPTION = \"DFFML model scikit\"\nAUTHOR_NAME = \"Yash Lamba\"\nAUTHOR_EMAIL = \"[email protected]\"\nINSTALL_REQUIRES = [\"scikit-learn>=0.21.2\", \"joblib>=0.13.2\", \"pandas>=0.25.0\"]\n\nIMPORT_NAME = (\n NAME\n if \"replace_package_name\".upper() != NAME\n else \"replace_import_package_name\".upper()\n).replace(\"-\", \"_\")\n\nSELF_PATH = os.path.dirname(os.path.realpath(__file__))\n\nwith open(os.path.join(SELF_PATH, IMPORT_NAME, \"version.py\"), \"r\") as f:\n for line in f:\n if line.startswith(\"VERSION\"):\n version = ast.literal_eval(line.strip().split(\"=\")[-1].strip())\n break\n\nwith open(os.path.join(SELF_PATH, \"README.md\"), \"r\", encoding=\"utf-8\") as f:\n readme = f.read()\n\nsetup(\n name=\"dffml-model-scikit\",\n version=version,\n description=\"\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"Yash Lamba\",\n author_email=\"[email protected]\",\n maintainer=\"John Andersen\",\n maintainer_email=\"[email protected]\",\n url=\"https://github.com/intel/dffml/blob/master/model/scikit/README.md\",\n license=\"MIT\",\n keywords=[\"dffml\"],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n install_requires=INSTALL_REQUIRES,\n packages=find_packages(),\n entry_points={\"dffml.model\": [f\"scikitlr = {IMPORT_NAME}.sciLR:LR\"]},\n)\n", "path": "model/scikit/setup.py"}], "after_files": [{"content": "import os\nimport ast\nfrom io import open\nfrom setuptools import find_packages, setup\n\nORG = \"intel\"\nNAME = \"dffml-model-scikit\"\nDESCRIPTION = \"DFFML model scikit\"\nAUTHOR_NAME = \"Yash Lamba\"\nAUTHOR_EMAIL = \"[email protected]\"\nINSTALL_REQUIRES = [\"scikit-learn>=0.21.2\", \"joblib>=0.13.2\", \"pandas>=0.25.0\"]\n\nIMPORT_NAME = (\n NAME\n if \"replace_package_name\".upper() != NAME\n else \"replace_import_package_name\".upper()\n).replace(\"-\", \"_\")\n\nSELF_PATH = os.path.dirname(os.path.realpath(__file__))\n\nwith open(os.path.join(SELF_PATH, IMPORT_NAME, \"version.py\"), \"r\") as f:\n for line in f:\n if line.startswith(\"VERSION\"):\n version = ast.literal_eval(line.strip().split(\"=\")[-1].strip())\n break\n\nwith open(os.path.join(SELF_PATH, \"README.md\"), \"r\", encoding=\"utf-8\") as f:\n readme = f.read()\n\nsetup(\n name=\"dffml-model-scikit\",\n version=version,\n description=\"\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n author=\"Yash Lamba\",\n author_email=\"[email protected]\",\n maintainer=\"John Andersen\",\n maintainer_email=\"[email protected]\",\n url=\"https://github.com/intel/dffml/blob/master/model/scikit/README.md\",\n license=\"MIT\",\n keywords=[\"dffml\"],\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n install_requires=INSTALL_REQUIRES,\n packages=find_packages(),\n entry_points={\n \"dffml.model\": [\n f\"scikitknn = {IMPORT_NAME}.scikit_models:KNeighborsClassifier\",\n f\"scikitadaboost = {IMPORT_NAME}.scikit_models:AdaBoostClassifier\",\n f\"scikitgpc = {IMPORT_NAME}.scikit_models:GaussianProcessClassifier\",\n f\"scikitdtc = {IMPORT_NAME}.scikit_models:DecisionTreeClassifier\",\n f\"scikitrfc = {IMPORT_NAME}.scikit_models:RandomForestClassifier\",\n f\"scikitmlp = {IMPORT_NAME}.scikit_models:MLPClassifier\",\n f\"scikitgnb = {IMPORT_NAME}.scikit_models:GaussianNB\",\n f\"scikitqda = {IMPORT_NAME}.scikit_models:QuadraticDiscriminantAnalysis\",\n f\"scikitsvc = {IMPORT_NAME}.scikit_models:SVC\",\n f\"scikitlr = {IMPORT_NAME}.scikit_models:LinearRegression\",\n ]\n },\n)\n", "path": "model/scikit/setup.py"}]} | 1,062 | 317 |
gh_patches_debug_18895 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1186 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pyhf json2xml requires pyhf[contrib]
# Description
```
$ pip install pyhf[xmlio]
$ pyhf json2xml -h
ERROR:pyhf.contrib.utils:No module named 'requests'
Installation of the contrib extra is required to use pyhf.contrib.utils.download
Please install with: python -m pip install pyhf[contrib]
Usage: pyhf json2xml [OPTIONS] [WORKSPACE]
Convert pyhf JSON back to XML + ROOT files.
Options:
--output-dir PATH
--specroot TEXT
--dataroot TEXT
--resultprefix TEXT
-p, --patch TEXT
-h, --help Show this message and exit.
```
# Expected Behavior
Shown the help without any reference to pyhf.contrib as this does not depend on contrib.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyhf/contrib/cli.py`
Content:
```
1 """CLI for functionality that will get migrated out eventually."""
2 import logging
3 import click
4 from pathlib import Path
5
6 from . import utils
7
8 logging.basicConfig()
9 log = logging.getLogger(__name__)
10
11
12 @click.group(name="contrib")
13 def cli():
14 """
15 Contrib experimental operations.
16
17 .. note::
18
19 Requires installation of the ``contrib`` extra.
20
21 .. code-block:: shell
22
23 $ python -m pip install pyhf[contrib]
24 """
25
26
27 @cli.command()
28 @click.argument("archive-url")
29 @click.argument("output-directory")
30 @click.option("-v", "--verbose", is_flag=True, help="Enables verbose mode")
31 @click.option(
32 "-f", "--force", is_flag=True, help="Force download from non-approved host"
33 )
34 @click.option(
35 "-c",
36 "--compress",
37 is_flag=True,
38 help="Keep the archive in a compressed tar.gz form",
39 )
40 def download(archive_url, output_directory, verbose, force, compress):
41 """
42 Download the patchset archive from the remote URL and extract it in a
43 directory at the path given.
44
45 Example:
46
47 .. code-block:: shell
48
49 $ pyhf contrib download --verbose https://doi.org/10.17182/hepdata.90607.v3/r3 1Lbb-likelihoods
50
51 \b
52 1Lbb-likelihoods/patchset.json
53 1Lbb-likelihoods/README.md
54 1Lbb-likelihoods/BkgOnly.json
55
56 Raises:
57 :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid
58 """
59 try:
60 utils.download(archive_url, output_directory, force, compress)
61
62 if verbose:
63 file_list = [str(file) for file in list(Path(output_directory).glob("*"))]
64 print("\n".join(file_list))
65 except AttributeError as excep:
66 exception_info = (
67 str(excep)
68 + "\nInstallation of the contrib extra is required to use the contrib CLI API"
69 + "\nPlease install with: python -m pip install pyhf[contrib]\n"
70 )
71 log.error(exception_info)
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pyhf/contrib/cli.py b/src/pyhf/contrib/cli.py
--- a/src/pyhf/contrib/cli.py
+++ b/src/pyhf/contrib/cli.py
@@ -3,8 +3,6 @@
import click
from pathlib import Path
-from . import utils
-
logging.basicConfig()
log = logging.getLogger(__name__)
@@ -22,6 +20,10 @@
$ python -m pip install pyhf[contrib]
"""
+ from . import utils # Guard CLI from missing extra
+
+ # TODO: https://github.com/scikit-hep/pyhf/issues/863
+ _ = utils # Placate pyflakes
@cli.command()
@@ -57,6 +59,8 @@
:class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid
"""
try:
+ from . import utils
+
utils.download(archive_url, output_directory, force, compress)
if verbose:
| {"golden_diff": "diff --git a/src/pyhf/contrib/cli.py b/src/pyhf/contrib/cli.py\n--- a/src/pyhf/contrib/cli.py\n+++ b/src/pyhf/contrib/cli.py\n@@ -3,8 +3,6 @@\n import click\n from pathlib import Path\n \n-from . import utils\n-\n logging.basicConfig()\n log = logging.getLogger(__name__)\n \n@@ -22,6 +20,10 @@\n \n $ python -m pip install pyhf[contrib]\n \"\"\"\n+ from . import utils # Guard CLI from missing extra\n+\n+ # TODO: https://github.com/scikit-hep/pyhf/issues/863\n+ _ = utils # Placate pyflakes\n \n \n @cli.command()\n@@ -57,6 +59,8 @@\n :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid\n \"\"\"\n try:\n+ from . import utils\n+\n utils.download(archive_url, output_directory, force, compress)\n \n if verbose:\n", "issue": "pyhf json2xml requires pyhf[contrib]\n# Description\r\n\r\n```\r\n$ pip install pyhf[xmlio]\r\n$ pyhf json2xml -h\r\nERROR:pyhf.contrib.utils:No module named 'requests'\r\nInstallation of the contrib extra is required to use pyhf.contrib.utils.download\r\nPlease install with: python -m pip install pyhf[contrib]\r\n\r\nUsage: pyhf json2xml [OPTIONS] [WORKSPACE]\r\n\r\n Convert pyhf JSON back to XML + ROOT files.\r\n\r\nOptions:\r\n --output-dir PATH\r\n --specroot TEXT\r\n --dataroot TEXT\r\n --resultprefix TEXT\r\n -p, --patch TEXT\r\n -h, --help Show this message and exit.\r\n```\r\n\r\n# Expected Behavior\r\n\r\nShown the help without any reference to pyhf.contrib as this does not depend on contrib.\n", "before_files": [{"content": "\"\"\"CLI for functionality that will get migrated out eventually.\"\"\"\nimport logging\nimport click\nfrom pathlib import Path\n\nfrom . import utils\n\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\n\[email protected](name=\"contrib\")\ndef cli():\n \"\"\"\n Contrib experimental operations.\n\n .. note::\n\n Requires installation of the ``contrib`` extra.\n\n .. code-block:: shell\n\n $ python -m pip install pyhf[contrib]\n \"\"\"\n\n\[email protected]()\[email protected](\"archive-url\")\[email protected](\"output-directory\")\[email protected](\"-v\", \"--verbose\", is_flag=True, help=\"Enables verbose mode\")\[email protected](\n \"-f\", \"--force\", is_flag=True, help=\"Force download from non-approved host\"\n)\[email protected](\n \"-c\",\n \"--compress\",\n is_flag=True,\n help=\"Keep the archive in a compressed tar.gz form\",\n)\ndef download(archive_url, output_directory, verbose, force, compress):\n \"\"\"\n Download the patchset archive from the remote URL and extract it in a\n directory at the path given.\n\n Example:\n\n .. code-block:: shell\n\n $ pyhf contrib download --verbose https://doi.org/10.17182/hepdata.90607.v3/r3 1Lbb-likelihoods\n\n \\b\n 1Lbb-likelihoods/patchset.json\n 1Lbb-likelihoods/README.md\n 1Lbb-likelihoods/BkgOnly.json\n\n Raises:\n :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid\n \"\"\"\n try:\n utils.download(archive_url, output_directory, force, compress)\n\n if verbose:\n file_list = [str(file) for file in list(Path(output_directory).glob(\"*\"))]\n print(\"\\n\".join(file_list))\n except AttributeError as excep:\n exception_info = (\n str(excep)\n + \"\\nInstallation of the contrib extra is required to use the contrib CLI API\"\n + \"\\nPlease install with: python -m pip install pyhf[contrib]\\n\"\n )\n log.error(exception_info)\n", "path": "src/pyhf/contrib/cli.py"}], "after_files": [{"content": "\"\"\"CLI for functionality that will get migrated out eventually.\"\"\"\nimport logging\nimport click\nfrom pathlib import Path\n\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\n\[email protected](name=\"contrib\")\ndef cli():\n \"\"\"\n Contrib experimental operations.\n\n .. note::\n\n Requires installation of the ``contrib`` extra.\n\n .. code-block:: shell\n\n $ python -m pip install pyhf[contrib]\n \"\"\"\n from . import utils # Guard CLI from missing extra\n\n # TODO: https://github.com/scikit-hep/pyhf/issues/863\n _ = utils # Placate pyflakes\n\n\[email protected]()\[email protected](\"archive-url\")\[email protected](\"output-directory\")\[email protected](\"-v\", \"--verbose\", is_flag=True, help=\"Enables verbose mode\")\[email protected](\n \"-f\", \"--force\", is_flag=True, help=\"Force download from non-approved host\"\n)\[email protected](\n \"-c\",\n \"--compress\",\n is_flag=True,\n help=\"Keep the archive in a compressed tar.gz form\",\n)\ndef download(archive_url, output_directory, verbose, force, compress):\n \"\"\"\n Download the patchset archive from the remote URL and extract it in a\n directory at the path given.\n\n Example:\n\n .. code-block:: shell\n\n $ pyhf contrib download --verbose https://doi.org/10.17182/hepdata.90607.v3/r3 1Lbb-likelihoods\n\n \\b\n 1Lbb-likelihoods/patchset.json\n 1Lbb-likelihoods/README.md\n 1Lbb-likelihoods/BkgOnly.json\n\n Raises:\n :class:`~pyhf.exceptions.InvalidArchiveHost`: if the provided archive host name is not known to be valid\n \"\"\"\n try:\n from . import utils\n\n utils.download(archive_url, output_directory, force, compress)\n\n if verbose:\n file_list = [str(file) for file in list(Path(output_directory).glob(\"*\"))]\n print(\"\\n\".join(file_list))\n except AttributeError as excep:\n exception_info = (\n str(excep)\n + \"\\nInstallation of the contrib extra is required to use the contrib CLI API\"\n + \"\\nPlease install with: python -m pip install pyhf[contrib]\\n\"\n )\n log.error(exception_info)\n", "path": "src/pyhf/contrib/cli.py"}]} | 1,044 | 226 |
gh_patches_debug_25711 | rasdani/github-patches | git_diff | ipython__ipython-8506 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
added %matplotlib --list so backends are easily visible
linked to #8444 adding --list argument to show the available backends
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `IPython/core/magics/pylab.py`
Content:
```
1 """Implementation of magic functions for matplotlib/pylab support.
2 """
3 from __future__ import print_function
4 #-----------------------------------------------------------------------------
5 # Copyright (c) 2012 The IPython Development Team.
6 #
7 # Distributed under the terms of the Modified BSD License.
8 #
9 # The full license is in the file COPYING.txt, distributed with this software.
10 #-----------------------------------------------------------------------------
11
12 #-----------------------------------------------------------------------------
13 # Imports
14 #-----------------------------------------------------------------------------
15
16 # Our own packages
17 from traitlets.config.application import Application
18 from IPython.core import magic_arguments
19 from IPython.core.magic import Magics, magics_class, line_magic
20 from IPython.testing.skipdoctest import skip_doctest
21 from IPython.utils.warn import warn
22 from IPython.core.pylabtools import backends
23
24 #-----------------------------------------------------------------------------
25 # Magic implementation classes
26 #-----------------------------------------------------------------------------
27
28 magic_gui_arg = magic_arguments.argument(
29 'gui', nargs='?',
30 help="""Name of the matplotlib backend to use %s.
31 If given, the corresponding matplotlib backend is used,
32 otherwise it will be matplotlib's default
33 (which you can set in your matplotlib config file).
34 """ % str(tuple(sorted(backends.keys())))
35 )
36
37
38 @magics_class
39 class PylabMagics(Magics):
40 """Magics related to matplotlib's pylab support"""
41
42 @skip_doctest
43 @line_magic
44 @magic_arguments.magic_arguments()
45 @magic_gui_arg
46 def matplotlib(self, line=''):
47 """Set up matplotlib to work interactively.
48
49 This function lets you activate matplotlib interactive support
50 at any point during an IPython session. It does not import anything
51 into the interactive namespace.
52
53 If you are using the inline matplotlib backend in the IPython Notebook
54 you can set which figure formats are enabled using the following::
55
56 In [1]: from IPython.display import set_matplotlib_formats
57
58 In [2]: set_matplotlib_formats('pdf', 'svg')
59
60 The default for inline figures sets `bbox_inches` to 'tight'. This can
61 cause discrepancies between the displayed image and the identical
62 image created using `savefig`. This behavior can be disabled using the
63 `%config` magic::
64
65 In [3]: %config InlineBackend.print_figure_kwargs = {'bbox_inches':None}
66
67 In addition, see the docstring of
68 `IPython.display.set_matplotlib_formats` and
69 `IPython.display.set_matplotlib_close` for more information on
70 changing additional behaviors of the inline backend.
71
72 Examples
73 --------
74 To enable the inline backend for usage with the IPython Notebook::
75
76 In [1]: %matplotlib inline
77
78 In this case, where the matplotlib default is TkAgg::
79
80 In [2]: %matplotlib
81 Using matplotlib backend: TkAgg
82
83 But you can explicitly request a different GUI backend::
84
85 In [3]: %matplotlib qt
86 """
87 args = magic_arguments.parse_argstring(self.matplotlib, line)
88 gui, backend = self.shell.enable_matplotlib(args.gui)
89 self._show_matplotlib_backend(args.gui, backend)
90
91 @skip_doctest
92 @line_magic
93 @magic_arguments.magic_arguments()
94 @magic_arguments.argument(
95 '--no-import-all', action='store_true', default=None,
96 help="""Prevent IPython from performing ``import *`` into the interactive namespace.
97
98 You can govern the default behavior of this flag with the
99 InteractiveShellApp.pylab_import_all configurable.
100 """
101 )
102 @magic_gui_arg
103 def pylab(self, line=''):
104 """Load numpy and matplotlib to work interactively.
105
106 This function lets you activate pylab (matplotlib, numpy and
107 interactive support) at any point during an IPython session.
108
109 %pylab makes the following imports::
110
111 import numpy
112 import matplotlib
113 from matplotlib import pylab, mlab, pyplot
114 np = numpy
115 plt = pyplot
116
117 from IPython.display import display
118 from IPython.core.pylabtools import figsize, getfigs
119
120 from pylab import *
121 from numpy import *
122
123 If you pass `--no-import-all`, the last two `*` imports will be excluded.
124
125 See the %matplotlib magic for more details about activating matplotlib
126 without affecting the interactive namespace.
127 """
128 args = magic_arguments.parse_argstring(self.pylab, line)
129 if args.no_import_all is None:
130 # get default from Application
131 if Application.initialized():
132 app = Application.instance()
133 try:
134 import_all = app.pylab_import_all
135 except AttributeError:
136 import_all = True
137 else:
138 # nothing specified, no app - default True
139 import_all = True
140 else:
141 # invert no-import flag
142 import_all = not args.no_import_all
143
144 gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)
145 self._show_matplotlib_backend(args.gui, backend)
146 print ("Populating the interactive namespace from numpy and matplotlib")
147 if clobbered:
148 warn("pylab import has clobbered these variables: %s" % clobbered +
149 "\n`%matplotlib` prevents importing * from pylab and numpy"
150 )
151
152 def _show_matplotlib_backend(self, gui, backend):
153 """show matplotlib message backend message"""
154 if not gui or gui == 'auto':
155 print("Using matplotlib backend: %s" % backend)
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/IPython/core/magics/pylab.py b/IPython/core/magics/pylab.py
--- a/IPython/core/magics/pylab.py
+++ b/IPython/core/magics/pylab.py
@@ -42,6 +42,8 @@
@skip_doctest
@line_magic
@magic_arguments.magic_arguments()
+ @magic_arguments.argument('-l', '--list', action='store_true',
+ help='Show available matplotlib backends')
@magic_gui_arg
def matplotlib(self, line=''):
"""Set up matplotlib to work interactively.
@@ -83,10 +85,20 @@
But you can explicitly request a different GUI backend::
In [3]: %matplotlib qt
+
+ You can list the available backends using the -l/--list option
+
+ In [4]: %matplotlib --list
+ Available matplotlib backends: ['osx', 'qt4', 'qt5', 'gtk3', 'notebook', 'wx', 'qt', 'nbagg',
+ 'gtk', 'tk', 'inline']
"""
args = magic_arguments.parse_argstring(self.matplotlib, line)
- gui, backend = self.shell.enable_matplotlib(args.gui)
- self._show_matplotlib_backend(args.gui, backend)
+ if args.list:
+ backends_list = list(backends.keys())
+ print("Available matplotlib backends: %s" % backends_list)
+ else:
+ gui, backend = self.shell.enable_matplotlib(args.gui)
+ self._show_matplotlib_backend(args.gui, backend)
@skip_doctest
@line_magic
| {"golden_diff": "diff --git a/IPython/core/magics/pylab.py b/IPython/core/magics/pylab.py\n--- a/IPython/core/magics/pylab.py\n+++ b/IPython/core/magics/pylab.py\n@@ -42,6 +42,8 @@\n @skip_doctest\n @line_magic\n @magic_arguments.magic_arguments()\n+ @magic_arguments.argument('-l', '--list', action='store_true',\n+ help='Show available matplotlib backends')\n @magic_gui_arg\n def matplotlib(self, line=''):\n \"\"\"Set up matplotlib to work interactively.\n@@ -83,10 +85,20 @@\n But you can explicitly request a different GUI backend::\n \n In [3]: %matplotlib qt\n+\n+ You can list the available backends using the -l/--list option\n+\n+ In [4]: %matplotlib --list\n+ Available matplotlib backends: ['osx', 'qt4', 'qt5', 'gtk3', 'notebook', 'wx', 'qt', 'nbagg',\n+ 'gtk', 'tk', 'inline']\n \"\"\"\n args = magic_arguments.parse_argstring(self.matplotlib, line)\n- gui, backend = self.shell.enable_matplotlib(args.gui)\n- self._show_matplotlib_backend(args.gui, backend)\n+ if args.list:\n+ backends_list = list(backends.keys())\n+ print(\"Available matplotlib backends: %s\" % backends_list)\n+ else:\n+ gui, backend = self.shell.enable_matplotlib(args.gui)\n+ self._show_matplotlib_backend(args.gui, backend)\n \n @skip_doctest\n @line_magic\n", "issue": "added %matplotlib --list so backends are easily visible\nlinked to #8444 adding --list argument to show the available backends\n\n", "before_files": [{"content": "\"\"\"Implementation of magic functions for matplotlib/pylab support.\n\"\"\"\nfrom __future__ import print_function\n#-----------------------------------------------------------------------------\n# Copyright (c) 2012 The IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Our own packages\nfrom traitlets.config.application import Application\nfrom IPython.core import magic_arguments\nfrom IPython.core.magic import Magics, magics_class, line_magic\nfrom IPython.testing.skipdoctest import skip_doctest\nfrom IPython.utils.warn import warn\nfrom IPython.core.pylabtools import backends\n\n#-----------------------------------------------------------------------------\n# Magic implementation classes\n#-----------------------------------------------------------------------------\n\nmagic_gui_arg = magic_arguments.argument(\n 'gui', nargs='?',\n help=\"\"\"Name of the matplotlib backend to use %s.\n If given, the corresponding matplotlib backend is used,\n otherwise it will be matplotlib's default\n (which you can set in your matplotlib config file).\n \"\"\" % str(tuple(sorted(backends.keys())))\n)\n\n\n@magics_class\nclass PylabMagics(Magics):\n \"\"\"Magics related to matplotlib's pylab support\"\"\"\n \n @skip_doctest\n @line_magic\n @magic_arguments.magic_arguments()\n @magic_gui_arg\n def matplotlib(self, line=''):\n \"\"\"Set up matplotlib to work interactively.\n \n This function lets you activate matplotlib interactive support\n at any point during an IPython session. It does not import anything\n into the interactive namespace.\n \n If you are using the inline matplotlib backend in the IPython Notebook\n you can set which figure formats are enabled using the following::\n \n In [1]: from IPython.display import set_matplotlib_formats\n \n In [2]: set_matplotlib_formats('pdf', 'svg')\n\n The default for inline figures sets `bbox_inches` to 'tight'. This can\n cause discrepancies between the displayed image and the identical\n image created using `savefig`. This behavior can be disabled using the\n `%config` magic::\n \n In [3]: %config InlineBackend.print_figure_kwargs = {'bbox_inches':None}\n\n In addition, see the docstring of\n `IPython.display.set_matplotlib_formats` and\n `IPython.display.set_matplotlib_close` for more information on\n changing additional behaviors of the inline backend.\n\n Examples\n --------\n To enable the inline backend for usage with the IPython Notebook::\n \n In [1]: %matplotlib inline\n\n In this case, where the matplotlib default is TkAgg::\n\n In [2]: %matplotlib\n Using matplotlib backend: TkAgg\n\n But you can explicitly request a different GUI backend::\n\n In [3]: %matplotlib qt\n \"\"\"\n args = magic_arguments.parse_argstring(self.matplotlib, line)\n gui, backend = self.shell.enable_matplotlib(args.gui)\n self._show_matplotlib_backend(args.gui, backend)\n\n @skip_doctest\n @line_magic\n @magic_arguments.magic_arguments()\n @magic_arguments.argument(\n '--no-import-all', action='store_true', default=None,\n help=\"\"\"Prevent IPython from performing ``import *`` into the interactive namespace.\n \n You can govern the default behavior of this flag with the\n InteractiveShellApp.pylab_import_all configurable.\n \"\"\"\n )\n @magic_gui_arg\n def pylab(self, line=''):\n \"\"\"Load numpy and matplotlib to work interactively.\n\n This function lets you activate pylab (matplotlib, numpy and\n interactive support) at any point during an IPython session.\n \n %pylab makes the following imports::\n \n import numpy\n import matplotlib\n from matplotlib import pylab, mlab, pyplot\n np = numpy\n plt = pyplot\n \n from IPython.display import display\n from IPython.core.pylabtools import figsize, getfigs\n \n from pylab import *\n from numpy import *\n\n If you pass `--no-import-all`, the last two `*` imports will be excluded.\n \n See the %matplotlib magic for more details about activating matplotlib\n without affecting the interactive namespace.\n \"\"\"\n args = magic_arguments.parse_argstring(self.pylab, line)\n if args.no_import_all is None:\n # get default from Application\n if Application.initialized():\n app = Application.instance()\n try:\n import_all = app.pylab_import_all\n except AttributeError:\n import_all = True\n else:\n # nothing specified, no app - default True\n import_all = True\n else:\n # invert no-import flag\n import_all = not args.no_import_all\n\n gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)\n self._show_matplotlib_backend(args.gui, backend)\n print (\"Populating the interactive namespace from numpy and matplotlib\")\n if clobbered:\n warn(\"pylab import has clobbered these variables: %s\" % clobbered +\n \"\\n`%matplotlib` prevents importing * from pylab and numpy\"\n )\n \n def _show_matplotlib_backend(self, gui, backend):\n \"\"\"show matplotlib message backend message\"\"\"\n if not gui or gui == 'auto':\n print(\"Using matplotlib backend: %s\" % backend)\n", "path": "IPython/core/magics/pylab.py"}], "after_files": [{"content": "\"\"\"Implementation of magic functions for matplotlib/pylab support.\n\"\"\"\nfrom __future__ import print_function\n#-----------------------------------------------------------------------------\n# Copyright (c) 2012 The IPython Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Our own packages\nfrom traitlets.config.application import Application\nfrom IPython.core import magic_arguments\nfrom IPython.core.magic import Magics, magics_class, line_magic\nfrom IPython.testing.skipdoctest import skip_doctest\nfrom IPython.utils.warn import warn\nfrom IPython.core.pylabtools import backends\n\n#-----------------------------------------------------------------------------\n# Magic implementation classes\n#-----------------------------------------------------------------------------\n\nmagic_gui_arg = magic_arguments.argument(\n 'gui', nargs='?',\n help=\"\"\"Name of the matplotlib backend to use %s.\n If given, the corresponding matplotlib backend is used,\n otherwise it will be matplotlib's default\n (which you can set in your matplotlib config file).\n \"\"\" % str(tuple(sorted(backends.keys())))\n)\n\n\n@magics_class\nclass PylabMagics(Magics):\n \"\"\"Magics related to matplotlib's pylab support\"\"\"\n \n @skip_doctest\n @line_magic\n @magic_arguments.magic_arguments()\n @magic_arguments.argument('-l', '--list', action='store_true',\n help='Show available matplotlib backends')\n @magic_gui_arg\n def matplotlib(self, line=''):\n \"\"\"Set up matplotlib to work interactively.\n \n This function lets you activate matplotlib interactive support\n at any point during an IPython session. It does not import anything\n into the interactive namespace.\n \n If you are using the inline matplotlib backend in the IPython Notebook\n you can set which figure formats are enabled using the following::\n \n In [1]: from IPython.display import set_matplotlib_formats\n \n In [2]: set_matplotlib_formats('pdf', 'svg')\n\n The default for inline figures sets `bbox_inches` to 'tight'. This can\n cause discrepancies between the displayed image and the identical\n image created using `savefig`. This behavior can be disabled using the\n `%config` magic::\n \n In [3]: %config InlineBackend.print_figure_kwargs = {'bbox_inches':None}\n\n In addition, see the docstring of\n `IPython.display.set_matplotlib_formats` and\n `IPython.display.set_matplotlib_close` for more information on\n changing additional behaviors of the inline backend.\n\n Examples\n --------\n To enable the inline backend for usage with the IPython Notebook::\n \n In [1]: %matplotlib inline\n\n In this case, where the matplotlib default is TkAgg::\n\n In [2]: %matplotlib\n Using matplotlib backend: TkAgg\n\n But you can explicitly request a different GUI backend::\n\n In [3]: %matplotlib qt\n\n You can list the available backends using the -l/--list option\n\n In [4]: %matplotlib --list\n Available matplotlib backends: ['osx', 'qt4', 'qt5', 'gtk3', 'notebook', 'wx', 'qt', 'nbagg',\n 'gtk', 'tk', 'inline']\n \"\"\"\n args = magic_arguments.parse_argstring(self.matplotlib, line)\n if args.list:\n backends_list = list(backends.keys())\n print(\"Available matplotlib backends: %s\" % backends_list)\n else:\n gui, backend = self.shell.enable_matplotlib(args.gui)\n self._show_matplotlib_backend(args.gui, backend)\n\n @skip_doctest\n @line_magic\n @magic_arguments.magic_arguments()\n @magic_arguments.argument(\n '--no-import-all', action='store_true', default=None,\n help=\"\"\"Prevent IPython from performing ``import *`` into the interactive namespace.\n \n You can govern the default behavior of this flag with the\n InteractiveShellApp.pylab_import_all configurable.\n \"\"\"\n )\n @magic_gui_arg\n def pylab(self, line=''):\n \"\"\"Load numpy and matplotlib to work interactively.\n\n This function lets you activate pylab (matplotlib, numpy and\n interactive support) at any point during an IPython session.\n \n %pylab makes the following imports::\n \n import numpy\n import matplotlib\n from matplotlib import pylab, mlab, pyplot\n np = numpy\n plt = pyplot\n \n from IPython.display import display\n from IPython.core.pylabtools import figsize, getfigs\n \n from pylab import *\n from numpy import *\n\n If you pass `--no-import-all`, the last two `*` imports will be excluded.\n \n See the %matplotlib magic for more details about activating matplotlib\n without affecting the interactive namespace.\n \"\"\"\n args = magic_arguments.parse_argstring(self.pylab, line)\n if args.no_import_all is None:\n # get default from Application\n if Application.initialized():\n app = Application.instance()\n try:\n import_all = app.pylab_import_all\n except AttributeError:\n import_all = True\n else:\n # nothing specified, no app - default True\n import_all = True\n else:\n # invert no-import flag\n import_all = not args.no_import_all\n\n gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all)\n self._show_matplotlib_backend(args.gui, backend)\n print (\"Populating the interactive namespace from numpy and matplotlib\")\n if clobbered:\n warn(\"pylab import has clobbered these variables: %s\" % clobbered +\n \"\\n`%matplotlib` prevents importing * from pylab and numpy\"\n )\n \n def _show_matplotlib_backend(self, gui, backend):\n \"\"\"show matplotlib message backend message\"\"\"\n if not gui or gui == 'auto':\n print(\"Using matplotlib backend: %s\" % backend)\n", "path": "IPython/core/magics/pylab.py"}]} | 1,806 | 361 |
gh_patches_debug_16253 | rasdani/github-patches | git_diff | mozilla__pontoon-3030 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Broken heroku deploy button
When i try to deploy pontoon on heroku, i got an infinite charge..

I just clicked on button from README file and logged in to heroku platform
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pontoon/base/management/commands/heroku_deploy_setup.py`
Content:
```
1 import os
2
3 from urllib.parse import urlparse, urljoin
4
5 from django.core.management.base import BaseCommand
6 from django.contrib.sites.models import Site
7
8 from pontoon.base.models import Project, User
9
10
11 class Command(BaseCommand):
12 help = "Setup an instance of Pontoon deployed via Heroku Deploy."
13
14 def handle(self, *args, **options):
15 site_url = os.environ.get("SITE_URL")
16 app_host = urlparse(site_url).netloc
17 admin_email = os.environ.get("ADMIN_EMAIL")
18 admin_password = os.environ.get("ADMIN_PASSWORD")
19
20 User.objects.create_superuser(admin_email, admin_email, admin_password)
21 Site.objects.filter(pk=1).update(name=app_host, domain=app_host)
22
23 Project.objects.filter(slug="pontoon-intro").update(
24 url=urljoin(site_url, "intro/")
25 )
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pontoon/base/management/commands/heroku_deploy_setup.py b/pontoon/base/management/commands/heroku_deploy_setup.py
--- a/pontoon/base/management/commands/heroku_deploy_setup.py
+++ b/pontoon/base/management/commands/heroku_deploy_setup.py
@@ -1,11 +1,11 @@
import os
-from urllib.parse import urlparse, urljoin
+from urllib.parse import urlparse
from django.core.management.base import BaseCommand
from django.contrib.sites.models import Site
-from pontoon.base.models import Project, User
+from pontoon.base.models import User
class Command(BaseCommand):
@@ -19,7 +19,3 @@
User.objects.create_superuser(admin_email, admin_email, admin_password)
Site.objects.filter(pk=1).update(name=app_host, domain=app_host)
-
- Project.objects.filter(slug="pontoon-intro").update(
- url=urljoin(site_url, "intro/")
- )
| {"golden_diff": "diff --git a/pontoon/base/management/commands/heroku_deploy_setup.py b/pontoon/base/management/commands/heroku_deploy_setup.py\n--- a/pontoon/base/management/commands/heroku_deploy_setup.py\n+++ b/pontoon/base/management/commands/heroku_deploy_setup.py\n@@ -1,11 +1,11 @@\n import os\n \n-from urllib.parse import urlparse, urljoin\n+from urllib.parse import urlparse\n \n from django.core.management.base import BaseCommand\n from django.contrib.sites.models import Site\n \n-from pontoon.base.models import Project, User\n+from pontoon.base.models import User\n \n \n class Command(BaseCommand):\n@@ -19,7 +19,3 @@\n \n User.objects.create_superuser(admin_email, admin_email, admin_password)\n Site.objects.filter(pk=1).update(name=app_host, domain=app_host)\n-\n- Project.objects.filter(slug=\"pontoon-intro\").update(\n- url=urljoin(site_url, \"intro/\")\n- )\n", "issue": "Broken heroku deploy button\nWhen i try to deploy pontoon on heroku, i got an infinite charge..\r\n\r\n\r\nI just clicked on button from README file and logged in to heroku platform\n", "before_files": [{"content": "import os\n\nfrom urllib.parse import urlparse, urljoin\n\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.sites.models import Site\n\nfrom pontoon.base.models import Project, User\n\n\nclass Command(BaseCommand):\n help = \"Setup an instance of Pontoon deployed via Heroku Deploy.\"\n\n def handle(self, *args, **options):\n site_url = os.environ.get(\"SITE_URL\")\n app_host = urlparse(site_url).netloc\n admin_email = os.environ.get(\"ADMIN_EMAIL\")\n admin_password = os.environ.get(\"ADMIN_PASSWORD\")\n\n User.objects.create_superuser(admin_email, admin_email, admin_password)\n Site.objects.filter(pk=1).update(name=app_host, domain=app_host)\n\n Project.objects.filter(slug=\"pontoon-intro\").update(\n url=urljoin(site_url, \"intro/\")\n )\n", "path": "pontoon/base/management/commands/heroku_deploy_setup.py"}], "after_files": [{"content": "import os\n\nfrom urllib.parse import urlparse\n\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.sites.models import Site\n\nfrom pontoon.base.models import User\n\n\nclass Command(BaseCommand):\n help = \"Setup an instance of Pontoon deployed via Heroku Deploy.\"\n\n def handle(self, *args, **options):\n site_url = os.environ.get(\"SITE_URL\")\n app_host = urlparse(site_url).netloc\n admin_email = os.environ.get(\"ADMIN_EMAIL\")\n admin_password = os.environ.get(\"ADMIN_PASSWORD\")\n\n User.objects.create_superuser(admin_email, admin_email, admin_password)\n Site.objects.filter(pk=1).update(name=app_host, domain=app_host)\n", "path": "pontoon/base/management/commands/heroku_deploy_setup.py"}]} | 600 | 214 |
gh_patches_debug_13676 | rasdani/github-patches | git_diff | pulp__pulpcore-2408 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Migration 0040_set_admin_is_staff.py is missing dependency on user model
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pulpcore/app/migrations/0040_set_admin_is_staff.py`
Content:
```
1 # Generated by Django 2.2.13 on 2020-07-01 21:29
2
3 from django.contrib.auth import get_user_model
4 from django.db import migrations
5
6
7 def allow_admin_as_staff(apps, schema_editor):
8 user_model = get_user_model()
9 try:
10 admin_user = user_model.objects.get(username='admin')
11 except user_model.DoesNotExist:
12 pass
13 else:
14 admin_user.is_staff = True
15 admin_user.save()
16
17
18 class Migration(migrations.Migration):
19
20 dependencies = [
21 ('core', '0039_change_download_concurrency'),
22 ]
23
24 operations = [
25 migrations.RunPython(allow_admin_as_staff),
26 ]
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pulpcore/app/migrations/0040_set_admin_is_staff.py b/pulpcore/app/migrations/0040_set_admin_is_staff.py
--- a/pulpcore/app/migrations/0040_set_admin_is_staff.py
+++ b/pulpcore/app/migrations/0040_set_admin_is_staff.py
@@ -1,5 +1,6 @@
# Generated by Django 2.2.13 on 2020-07-01 21:29
+from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import migrations
@@ -19,8 +20,9 @@
dependencies = [
('core', '0039_change_download_concurrency'),
+ migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
- migrations.RunPython(allow_admin_as_staff),
+ migrations.RunPython(allow_admin_as_staff, reverse_code=migrations.RunPython.noop, elidable=True),
]
| {"golden_diff": "diff --git a/pulpcore/app/migrations/0040_set_admin_is_staff.py b/pulpcore/app/migrations/0040_set_admin_is_staff.py\n--- a/pulpcore/app/migrations/0040_set_admin_is_staff.py\n+++ b/pulpcore/app/migrations/0040_set_admin_is_staff.py\n@@ -1,5 +1,6 @@\n # Generated by Django 2.2.13 on 2020-07-01 21:29\n \n+from django.conf import settings\n from django.contrib.auth import get_user_model\n from django.db import migrations\n \n@@ -19,8 +20,9 @@\n \n dependencies = [\n ('core', '0039_change_download_concurrency'),\n+ migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n \n operations = [\n- migrations.RunPython(allow_admin_as_staff),\n+ migrations.RunPython(allow_admin_as_staff, reverse_code=migrations.RunPython.noop, elidable=True),\n ]\n", "issue": "Migration 0040_set_admin_is_staff.py is missing dependency on user model\n\n", "before_files": [{"content": "# Generated by Django 2.2.13 on 2020-07-01 21:29\n\nfrom django.contrib.auth import get_user_model\nfrom django.db import migrations\n\n\ndef allow_admin_as_staff(apps, schema_editor):\n user_model = get_user_model()\n try:\n admin_user = user_model.objects.get(username='admin')\n except user_model.DoesNotExist:\n pass\n else:\n admin_user.is_staff = True\n admin_user.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0039_change_download_concurrency'),\n ]\n\n operations = [\n migrations.RunPython(allow_admin_as_staff),\n ]\n", "path": "pulpcore/app/migrations/0040_set_admin_is_staff.py"}], "after_files": [{"content": "# Generated by Django 2.2.13 on 2020-07-01 21:29\n\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.db import migrations\n\n\ndef allow_admin_as_staff(apps, schema_editor):\n user_model = get_user_model()\n try:\n admin_user = user_model.objects.get(username='admin')\n except user_model.DoesNotExist:\n pass\n else:\n admin_user.is_staff = True\n admin_user.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0039_change_download_concurrency'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.RunPython(allow_admin_as_staff, reverse_code=migrations.RunPython.noop, elidable=True),\n ]\n", "path": "pulpcore/app/migrations/0040_set_admin_is_staff.py"}]} | 487 | 224 |
gh_patches_debug_3692 | rasdani/github-patches | git_diff | nautobot__nautobot-566 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`nautobot.core.api.serializers.WritableNestedSerializer` docstring is incorrect and confusing
<!--
NOTE: This template is for use by maintainers only. Please do not submit
an issue using this template unless you have been specifically asked to
do so.
-->
### Proposed Changes
The docstring should read something like this:
> Returns a nested representation of an object on read, but accepts either the nested representation or just the PK value on write operations.
<!-- Provide justification for the proposed change(s). -->
### Justification
The current docstring is in conflict with the actual implementation by stating that only the PK value is supported on write, which is incorrect.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/core/api/serializers.py`
Content:
```
1 import uuid
2
3 from django.core.exceptions import (
4 FieldError,
5 MultipleObjectsReturned,
6 ObjectDoesNotExist,
7 )
8 from django.db.models import AutoField, ManyToManyField
9 from drf_yasg.utils import swagger_serializer_method
10 from rest_framework import serializers
11 from rest_framework.exceptions import ValidationError
12
13 from nautobot.utilities.utils import dict_to_filter_params
14
15
16 class BaseModelSerializer(serializers.ModelSerializer):
17 """
18 This base serializer implements common fields and logic for all ModelSerializers.
19 Namely it defines the `display` field which exposes a human friendly value for the given object.
20 """
21
22 display = serializers.SerializerMethodField(read_only=True, help_text="Human friendly display value")
23
24 @swagger_serializer_method(serializer_or_field=serializers.CharField)
25 def get_display(self, instance):
26 """
27 Return either the `display` property of the instance or `str(instance)`
28 """
29 return getattr(instance, "display", str(instance))
30
31 def get_field_names(self, declared_fields, info):
32 """
33 Override get_field_names() to append the `display` field so it is always included in the
34 serializer's `Meta.fields`.
35
36 DRF does not automatically add declared fields to `Meta.fields`, nor does it require that declared fields
37 on a super class be included in `Meta.fields` to allow for a subclass to include only a subset of declared
38 fields from the super. This means either we intercept and append the display field at this level, or
39 enforce by convention that all consumers of BaseModelSerializer include `display` in their `Meta.fields`
40 which would surely lead to errors of omission; therefore we have chosen the former approach.
41 """
42 fields = list(super().get_field_names(declared_fields, info)) # Meta.fields could be defined as a tuple
43 fields.append("display")
44
45 return fields
46
47
48 class ValidatedModelSerializer(BaseModelSerializer):
49 """
50 Extends the built-in ModelSerializer to enforce calling full_clean() on a copy of the associated instance during
51 validation. (DRF does not do this by default; see https://github.com/encode/django-rest-framework/issues/3144)
52 """
53
54 def validate(self, data):
55
56 # Remove custom fields data and tags (if any) prior to model validation
57 attrs = data.copy()
58 attrs.pop("custom_fields", None)
59 attrs.pop("tags", None)
60
61 # Skip ManyToManyFields
62 for field in self.Meta.model._meta.get_fields():
63 if isinstance(field, ManyToManyField):
64 attrs.pop(field.name, None)
65
66 # Run clean() on an instance of the model
67 if self.instance is None:
68 instance = self.Meta.model(**attrs)
69 else:
70 instance = self.instance
71 for k, v in attrs.items():
72 setattr(instance, k, v)
73 instance.full_clean()
74
75 return data
76
77
78 class WritableNestedSerializer(BaseModelSerializer):
79 """
80 Returns a nested representation of an object on read, but accepts only a primary key on write.
81 """
82
83 def to_internal_value(self, data):
84
85 if data is None:
86 return None
87
88 # Dictionary of related object attributes
89 if isinstance(data, dict):
90 params = dict_to_filter_params(data)
91 queryset = self.Meta.model.objects
92 try:
93 return queryset.get(**params)
94 except ObjectDoesNotExist:
95 raise ValidationError("Related object not found using the provided attributes: {}".format(params))
96 except MultipleObjectsReturned:
97 raise ValidationError("Multiple objects match the provided attributes: {}".format(params))
98 except FieldError as e:
99 raise ValidationError(e)
100
101 queryset = self.Meta.model.objects
102 pk = None
103
104 if isinstance(self.Meta.model._meta.pk, AutoField):
105 # PK is an int for this model. This is usually the User model
106 try:
107 pk = int(data)
108 except (TypeError, ValueError):
109 raise ValidationError(
110 "Related objects must be referenced by ID or by dictionary of attributes. Received an "
111 "unrecognized value: {}".format(data)
112 )
113
114 else:
115 # We assume a type of UUIDField for all other models
116
117 # PK of related object
118 try:
119 # Ensure the pk is a valid UUID
120 pk = uuid.UUID(str(data))
121 except (TypeError, ValueError):
122 raise ValidationError(
123 "Related objects must be referenced by ID or by dictionary of attributes. Received an "
124 "unrecognized value: {}".format(data)
125 )
126
127 try:
128 return queryset.get(pk=pk)
129 except ObjectDoesNotExist:
130 raise ValidationError("Related object not found using the provided ID: {}".format(pk))
131
132
133 class BulkOperationSerializer(serializers.Serializer):
134 id = serializers.CharField() # This supports both UUIDs and numeric ID for the User model
135
136
137 #
138 # GraphQL, used by the openapi doc, not by the view
139 #
140
141
142 class GraphQLAPISerializer(serializers.Serializer):
143 query = serializers.CharField(required=True, help_text="GraphQL query")
144 variables = serializers.JSONField(required=False, help_text="Variables in JSON Format")
145
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nautobot/core/api/serializers.py b/nautobot/core/api/serializers.py
--- a/nautobot/core/api/serializers.py
+++ b/nautobot/core/api/serializers.py
@@ -77,7 +77,8 @@
class WritableNestedSerializer(BaseModelSerializer):
"""
- Returns a nested representation of an object on read, but accepts only a primary key on write.
+ Returns a nested representation of an object on read, but accepts either the nested representation or the
+ primary key value on write operations.
"""
def to_internal_value(self, data):
| {"golden_diff": "diff --git a/nautobot/core/api/serializers.py b/nautobot/core/api/serializers.py\n--- a/nautobot/core/api/serializers.py\n+++ b/nautobot/core/api/serializers.py\n@@ -77,7 +77,8 @@\n \n class WritableNestedSerializer(BaseModelSerializer):\n \"\"\"\n- Returns a nested representation of an object on read, but accepts only a primary key on write.\n+ Returns a nested representation of an object on read, but accepts either the nested representation or the\n+ primary key value on write operations.\n \"\"\"\n \n def to_internal_value(self, data):\n", "issue": "`nautobot.core.api.serializers.WritableNestedSerializer` docstring is incorrect and confusing\n<!--\r\n NOTE: This template is for use by maintainers only. Please do not submit\r\n an issue using this template unless you have been specifically asked to\r\n do so.\r\n-->\r\n### Proposed Changes\r\nThe docstring should read something like this:\r\n> Returns a nested representation of an object on read, but accepts either the nested representation or just the PK value on write operations.\r\n\r\n<!-- Provide justification for the proposed change(s). -->\r\n### Justification\r\nThe current docstring is in conflict with the actual implementation by stating that only the PK value is supported on write, which is incorrect.\n", "before_files": [{"content": "import uuid\n\nfrom django.core.exceptions import (\n FieldError,\n MultipleObjectsReturned,\n ObjectDoesNotExist,\n)\nfrom django.db.models import AutoField, ManyToManyField\nfrom drf_yasg.utils import swagger_serializer_method\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\n\nfrom nautobot.utilities.utils import dict_to_filter_params\n\n\nclass BaseModelSerializer(serializers.ModelSerializer):\n \"\"\"\n This base serializer implements common fields and logic for all ModelSerializers.\n Namely it defines the `display` field which exposes a human friendly value for the given object.\n \"\"\"\n\n display = serializers.SerializerMethodField(read_only=True, help_text=\"Human friendly display value\")\n\n @swagger_serializer_method(serializer_or_field=serializers.CharField)\n def get_display(self, instance):\n \"\"\"\n Return either the `display` property of the instance or `str(instance)`\n \"\"\"\n return getattr(instance, \"display\", str(instance))\n\n def get_field_names(self, declared_fields, info):\n \"\"\"\n Override get_field_names() to append the `display` field so it is always included in the\n serializer's `Meta.fields`.\n\n DRF does not automatically add declared fields to `Meta.fields`, nor does it require that declared fields\n on a super class be included in `Meta.fields` to allow for a subclass to include only a subset of declared\n fields from the super. This means either we intercept and append the display field at this level, or\n enforce by convention that all consumers of BaseModelSerializer include `display` in their `Meta.fields`\n which would surely lead to errors of omission; therefore we have chosen the former approach.\n \"\"\"\n fields = list(super().get_field_names(declared_fields, info)) # Meta.fields could be defined as a tuple\n fields.append(\"display\")\n\n return fields\n\n\nclass ValidatedModelSerializer(BaseModelSerializer):\n \"\"\"\n Extends the built-in ModelSerializer to enforce calling full_clean() on a copy of the associated instance during\n validation. (DRF does not do this by default; see https://github.com/encode/django-rest-framework/issues/3144)\n \"\"\"\n\n def validate(self, data):\n\n # Remove custom fields data and tags (if any) prior to model validation\n attrs = data.copy()\n attrs.pop(\"custom_fields\", None)\n attrs.pop(\"tags\", None)\n\n # Skip ManyToManyFields\n for field in self.Meta.model._meta.get_fields():\n if isinstance(field, ManyToManyField):\n attrs.pop(field.name, None)\n\n # Run clean() on an instance of the model\n if self.instance is None:\n instance = self.Meta.model(**attrs)\n else:\n instance = self.instance\n for k, v in attrs.items():\n setattr(instance, k, v)\n instance.full_clean()\n\n return data\n\n\nclass WritableNestedSerializer(BaseModelSerializer):\n \"\"\"\n Returns a nested representation of an object on read, but accepts only a primary key on write.\n \"\"\"\n\n def to_internal_value(self, data):\n\n if data is None:\n return None\n\n # Dictionary of related object attributes\n if isinstance(data, dict):\n params = dict_to_filter_params(data)\n queryset = self.Meta.model.objects\n try:\n return queryset.get(**params)\n except ObjectDoesNotExist:\n raise ValidationError(\"Related object not found using the provided attributes: {}\".format(params))\n except MultipleObjectsReturned:\n raise ValidationError(\"Multiple objects match the provided attributes: {}\".format(params))\n except FieldError as e:\n raise ValidationError(e)\n\n queryset = self.Meta.model.objects\n pk = None\n\n if isinstance(self.Meta.model._meta.pk, AutoField):\n # PK is an int for this model. This is usually the User model\n try:\n pk = int(data)\n except (TypeError, ValueError):\n raise ValidationError(\n \"Related objects must be referenced by ID or by dictionary of attributes. Received an \"\n \"unrecognized value: {}\".format(data)\n )\n\n else:\n # We assume a type of UUIDField for all other models\n\n # PK of related object\n try:\n # Ensure the pk is a valid UUID\n pk = uuid.UUID(str(data))\n except (TypeError, ValueError):\n raise ValidationError(\n \"Related objects must be referenced by ID or by dictionary of attributes. Received an \"\n \"unrecognized value: {}\".format(data)\n )\n\n try:\n return queryset.get(pk=pk)\n except ObjectDoesNotExist:\n raise ValidationError(\"Related object not found using the provided ID: {}\".format(pk))\n\n\nclass BulkOperationSerializer(serializers.Serializer):\n id = serializers.CharField() # This supports both UUIDs and numeric ID for the User model\n\n\n#\n# GraphQL, used by the openapi doc, not by the view\n#\n\n\nclass GraphQLAPISerializer(serializers.Serializer):\n query = serializers.CharField(required=True, help_text=\"GraphQL query\")\n variables = serializers.JSONField(required=False, help_text=\"Variables in JSON Format\")\n", "path": "nautobot/core/api/serializers.py"}], "after_files": [{"content": "import uuid\n\nfrom django.core.exceptions import (\n FieldError,\n MultipleObjectsReturned,\n ObjectDoesNotExist,\n)\nfrom django.db.models import AutoField, ManyToManyField\nfrom drf_yasg.utils import swagger_serializer_method\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\n\nfrom nautobot.utilities.utils import dict_to_filter_params\n\n\nclass BaseModelSerializer(serializers.ModelSerializer):\n \"\"\"\n This base serializer implements common fields and logic for all ModelSerializers.\n Namely it defines the `display` field which exposes a human friendly value for the given object.\n \"\"\"\n\n display = serializers.SerializerMethodField(read_only=True, help_text=\"Human friendly display value\")\n\n @swagger_serializer_method(serializer_or_field=serializers.CharField)\n def get_display(self, instance):\n \"\"\"\n Return either the `display` property of the instance or `str(instance)`\n \"\"\"\n return getattr(instance, \"display\", str(instance))\n\n def get_field_names(self, declared_fields, info):\n \"\"\"\n Override get_field_names() to append the `display` field so it is always included in the\n serializer's `Meta.fields`.\n\n DRF does not automatically add declared fields to `Meta.fields`, nor does it require that declared fields\n on a super class be included in `Meta.fields` to allow for a subclass to include only a subset of declared\n fields from the super. This means either we intercept and append the display field at this level, or\n enforce by convention that all consumers of BaseModelSerializer include `display` in their `Meta.fields`\n which would surely lead to errors of omission; therefore we have chosen the former approach.\n \"\"\"\n fields = list(super().get_field_names(declared_fields, info)) # Meta.fields could be defined as a tuple\n fields.append(\"display\")\n\n return fields\n\n\nclass ValidatedModelSerializer(BaseModelSerializer):\n \"\"\"\n Extends the built-in ModelSerializer to enforce calling full_clean() on a copy of the associated instance during\n validation. (DRF does not do this by default; see https://github.com/encode/django-rest-framework/issues/3144)\n \"\"\"\n\n def validate(self, data):\n\n # Remove custom fields data and tags (if any) prior to model validation\n attrs = data.copy()\n attrs.pop(\"custom_fields\", None)\n attrs.pop(\"tags\", None)\n\n # Skip ManyToManyFields\n for field in self.Meta.model._meta.get_fields():\n if isinstance(field, ManyToManyField):\n attrs.pop(field.name, None)\n\n # Run clean() on an instance of the model\n if self.instance is None:\n instance = self.Meta.model(**attrs)\n else:\n instance = self.instance\n for k, v in attrs.items():\n setattr(instance, k, v)\n instance.full_clean()\n\n return data\n\n\nclass WritableNestedSerializer(BaseModelSerializer):\n \"\"\"\n Returns a nested representation of an object on read, but accepts either the nested representation or the\n primary key value on write operations.\n \"\"\"\n\n def to_internal_value(self, data):\n\n if data is None:\n return None\n\n # Dictionary of related object attributes\n if isinstance(data, dict):\n params = dict_to_filter_params(data)\n queryset = self.Meta.model.objects\n try:\n return queryset.get(**params)\n except ObjectDoesNotExist:\n raise ValidationError(\"Related object not found using the provided attributes: {}\".format(params))\n except MultipleObjectsReturned:\n raise ValidationError(\"Multiple objects match the provided attributes: {}\".format(params))\n except FieldError as e:\n raise ValidationError(e)\n\n queryset = self.Meta.model.objects\n pk = None\n\n if isinstance(self.Meta.model._meta.pk, AutoField):\n # PK is an int for this model. This is usually the User model\n try:\n pk = int(data)\n except (TypeError, ValueError):\n raise ValidationError(\n \"Related objects must be referenced by ID or by dictionary of attributes. Received an \"\n \"unrecognized value: {}\".format(data)\n )\n\n else:\n # We assume a type of UUIDField for all other models\n\n # PK of related object\n try:\n # Ensure the pk is a valid UUID\n pk = uuid.UUID(str(data))\n except (TypeError, ValueError):\n raise ValidationError(\n \"Related objects must be referenced by ID or by dictionary of attributes. Received an \"\n \"unrecognized value: {}\".format(data)\n )\n\n try:\n return queryset.get(pk=pk)\n except ObjectDoesNotExist:\n raise ValidationError(\"Related object not found using the provided ID: {}\".format(pk))\n\n\nclass BulkOperationSerializer(serializers.Serializer):\n id = serializers.CharField() # This supports both UUIDs and numeric ID for the User model\n\n\n#\n# GraphQL, used by the openapi doc, not by the view\n#\n\n\nclass GraphQLAPISerializer(serializers.Serializer):\n query = serializers.CharField(required=True, help_text=\"GraphQL query\")\n variables = serializers.JSONField(required=False, help_text=\"Variables in JSON Format\")\n", "path": "nautobot/core/api/serializers.py"}]} | 1,798 | 135 |
gh_patches_debug_4 | rasdani/github-patches | git_diff | horovod__horovod-1139 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Replace .step(synchronize=False) with optimizer.skip_synchronize()
NVIDIA AMP does not support passing additional flags to `optimizer.step()`, such as `optimizer.step(synchronize=False)`.
This PR switches API to use context manager:
```python
optimizer.synchronize()
with optimizer.skip_synchronize():
optimizer.step()
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `horovod/__init__.py`
Content:
```
1 __version__ = '0.16.3'
2
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/horovod/__init__.py b/horovod/__init__.py
--- a/horovod/__init__.py
+++ b/horovod/__init__.py
@@ -1 +1 @@
-__version__ = '0.16.3'
+__version__ = '0.16.4'
| {"golden_diff": "diff --git a/horovod/__init__.py b/horovod/__init__.py\n--- a/horovod/__init__.py\n+++ b/horovod/__init__.py\n@@ -1 +1 @@\n-__version__ = '0.16.3'\n+__version__ = '0.16.4'\n", "issue": "Replace .step(synchronize=False) with optimizer.skip_synchronize()\nNVIDIA AMP does not support passing additional flags to `optimizer.step()`, such as `optimizer.step(synchronize=False)`.\r\n\r\nThis PR switches API to use context manager:\r\n```python\r\noptimizer.synchronize()\r\nwith optimizer.skip_synchronize():\r\n optimizer.step()\r\n```\n", "before_files": [{"content": "__version__ = '0.16.3'\n", "path": "horovod/__init__.py"}], "after_files": [{"content": "__version__ = '0.16.4'\n", "path": "horovod/__init__.py"}]} | 341 | 76 |
gh_patches_debug_19971 | rasdani/github-patches | git_diff | vispy__vispy-476 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Adding more documentation
Currently, we only have the API reference. There's no other documentation at the moment.
Here are a few references we could take inspiration from/copy.
- [@rougier's tutorial](http://www.loria.fr/~rougier/teaching/opengl/)
- Recipe from the IPython Cookbook (link coming soon)
- [Paper in Frontiers in Neuroinformatics](http://journal.frontiersin.org/Journal/10.3389/fninf.2013.00036/full)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vispy/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2014, Vispy Development Team.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4
5 """
6
7 =====
8 Vispy
9 =====
10
11 Vispy is a collaborative project that has the goal to allow more sharing
12 of code between visualization projects based on OpenGL. It does this
13 by providing powerful interfaces to OpenGL, at different levels of
14 abstraction and generality.
15
16 Vispy consists of the following modules:
17 * vispy.app: for creating windows, timers and mainloops for various backends
18 * vispy.gloo: Object oriented GL API
19 * vispy.gloo.gl: Low level OpenGL API
20 * vispy.util: various utilities
21 * vispy.scene: Higher level visualization objects (work in progress)
22 * vispy.mpl_plot: matplotlib interface (work in progress)
23 * ... more to come
24
25 Vispy comes with a powerful event system and a simple application
26 framework that works on multiple backends. This allows easy creation
27 of figures, and enables integrating visualizations in a GUI application.
28
29 For more information see http://vispy.org.
30 """
31
32 from __future__ import division
33
34 __all__ = ['use', 'sys_info', 'set_log_level', 'test']
35
36 # Definition of the version number
37 __version__ = '0.3'
38
39
40 from .util import (_parse_command_line_arguments, config, # noqa
41 set_log_level, keys, sys_info, test) # noqa
42 from .util.wrappers import use # noqa
43
44 _parse_command_line_arguments()
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vispy/__init__.py b/vispy/__init__.py
--- a/vispy/__init__.py
+++ b/vispy/__init__.py
@@ -8,25 +8,13 @@
Vispy
=====
-Vispy is a collaborative project that has the goal to allow more sharing
-of code between visualization projects based on OpenGL. It does this
-by providing powerful interfaces to OpenGL, at different levels of
-abstraction and generality.
-
-Vispy consists of the following modules:
- * vispy.app: for creating windows, timers and mainloops for various backends
- * vispy.gloo: Object oriented GL API
- * vispy.gloo.gl: Low level OpenGL API
- * vispy.util: various utilities
- * vispy.scene: Higher level visualization objects (work in progress)
- * vispy.mpl_plot: matplotlib interface (work in progress)
- * ... more to come
-
-Vispy comes with a powerful event system and a simple application
-framework that works on multiple backends. This allows easy creation
-of figures, and enables integrating visualizations in a GUI application.
-
-For more information see http://vispy.org.
+Vispy is a **high-performance interactive 2D/3D data visualization
+library**. Vispy leverages the computational power of modern **Graphics
+Processing Units (GPUs)** through the **OpenGL** library to display very
+large datasets.
+
+For more information, see http://vispy.org.
+
"""
from __future__ import division
| {"golden_diff": "diff --git a/vispy/__init__.py b/vispy/__init__.py\n--- a/vispy/__init__.py\n+++ b/vispy/__init__.py\n@@ -8,25 +8,13 @@\n Vispy\n =====\n \n-Vispy is a collaborative project that has the goal to allow more sharing\n-of code between visualization projects based on OpenGL. It does this\n-by providing powerful interfaces to OpenGL, at different levels of\n-abstraction and generality.\n-\n-Vispy consists of the following modules:\n- * vispy.app: for creating windows, timers and mainloops for various backends\n- * vispy.gloo: Object oriented GL API\n- * vispy.gloo.gl: Low level OpenGL API\n- * vispy.util: various utilities\n- * vispy.scene: Higher level visualization objects (work in progress)\n- * vispy.mpl_plot: matplotlib interface (work in progress)\n- * ... more to come\n-\n-Vispy comes with a powerful event system and a simple application\n-framework that works on multiple backends. This allows easy creation\n-of figures, and enables integrating visualizations in a GUI application.\n-\n-For more information see http://vispy.org.\n+Vispy is a **high-performance interactive 2D/3D data visualization\n+library**. Vispy leverages the computational power of modern **Graphics\n+Processing Units (GPUs)** through the **OpenGL** library to display very\n+large datasets.\n+\n+For more information, see http://vispy.org.\n+\n \"\"\"\n \n from __future__ import division\n", "issue": "Adding more documentation\nCurrently, we only have the API reference. There's no other documentation at the moment.\n\nHere are a few references we could take inspiration from/copy.\n- [@rougier's tutorial](http://www.loria.fr/~rougier/teaching/opengl/)\n- Recipe from the IPython Cookbook (link coming soon)\n- [Paper in Frontiers in Neuroinformatics](http://journal.frontiersin.org/Journal/10.3389/fninf.2013.00036/full)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\"\n\n=====\nVispy\n=====\n\nVispy is a collaborative project that has the goal to allow more sharing\nof code between visualization projects based on OpenGL. It does this\nby providing powerful interfaces to OpenGL, at different levels of\nabstraction and generality.\n\nVispy consists of the following modules:\n * vispy.app: for creating windows, timers and mainloops for various backends\n * vispy.gloo: Object oriented GL API\n * vispy.gloo.gl: Low level OpenGL API\n * vispy.util: various utilities\n * vispy.scene: Higher level visualization objects (work in progress)\n * vispy.mpl_plot: matplotlib interface (work in progress)\n * ... more to come\n\nVispy comes with a powerful event system and a simple application\nframework that works on multiple backends. This allows easy creation\nof figures, and enables integrating visualizations in a GUI application.\n\nFor more information see http://vispy.org.\n\"\"\"\n\nfrom __future__ import division\n\n__all__ = ['use', 'sys_info', 'set_log_level', 'test']\n\n# Definition of the version number\n__version__ = '0.3'\n\n\nfrom .util import (_parse_command_line_arguments, config, # noqa\n set_log_level, keys, sys_info, test) # noqa\nfrom .util.wrappers import use # noqa\n\n_parse_command_line_arguments()\n", "path": "vispy/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\"\n\n=====\nVispy\n=====\n\nVispy is a **high-performance interactive 2D/3D data visualization\nlibrary**. Vispy leverages the computational power of modern **Graphics\nProcessing Units (GPUs)** through the **OpenGL** library to display very\nlarge datasets.\n\nFor more information, see http://vispy.org.\n\n\"\"\"\n\nfrom __future__ import division\n\n__all__ = ['use', 'sys_info', 'set_log_level', 'test']\n\n# Definition of the version number\n__version__ = '0.3'\n\n\nfrom .util import (_parse_command_line_arguments, config, # noqa\n set_log_level, keys, sys_info, test) # noqa\nfrom .util.wrappers import use # noqa\n\n_parse_command_line_arguments()\n", "path": "vispy/__init__.py"}]} | 800 | 343 |
gh_patches_debug_11619 | rasdani/github-patches | git_diff | cloudtools__troposphere-440 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Function Role property doesn't work with string value
The following results in a TypeError for the Role property type. I don't get a TypeError if I use GetAtt for Role's type, but I need to use a string. Looks like an issue in which the expected type is a list of possible types, but the validation on types then thinks the value should be a list instead of being one of the types in the list.
```
code = Code(
S3Bucket=self.s3Bucket,
S3Key=self.s3Key
)
func = Function(
"title",
Code=code,
Handler="index.handler",
Role="role-arn-here",
Runtime="nodejs",
)
```
```
File "C:\Users\bmorin\AppData\Local\Programs\Python\Python35\lib\site-packages\troposphere\__init__.py", line 66, in __init__
self.__setattr__(k, v)
File "C:\Users\bmorin\AppData\Local\Programs\Python\Python35\lib\site-packages\troposphere\__init__.py", line 110, in __setattr__
self._raise_type(name, value, expected_type)
File "C:\Users\bmorin\AppData\Local\Programs\Python\Python35\lib\site-packages\troposphere\__init__.py", line 145, in _raise_type
expected_type))
TypeError: <class 'troposphere.awslambda.Function'>: EventssomeFunction.Role is <class 'str'>, expected [<class 'str'>, <class 'troposphere.GetAtt'>]
```
Suspected issue spot in `__init__.py:`
```
# If we're expecting a list, then make sure it is a list
if not isinstance(value, list):
self._raise_type(name, value, expected_type)
```
And the definition of Role in `awslambda.py:`
```
class Function(AWSObject):
resource_type = "AWS::Lambda::Function"
props = {
'Code': (Code, True),
'Description': (str, False),
'Handler': (str, True),
'MemorySize': (positive_integer, False),
'Role': ([str, GetAtt], True),
'Runtime': (str, True),
'Timeout': (positive_integer, False),
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `troposphere/awslambda.py`
Content:
```
1 from . import AWSObject, AWSProperty, GetAtt
2 from .validators import positive_integer
3
4
5 class Code(AWSProperty):
6 props = {
7 'S3Bucket': (basestring, False),
8 'S3Key': (basestring, False),
9 'S3ObjectVersion': (basestring, False),
10 'ZipFile': (basestring, False)
11 }
12
13 def validate(self):
14 zip_file = self.properties.get('ZipFile')
15 s3_bucket = self.properties.get('S3Bucket')
16 s3_key = self.properties.get('S3Key')
17 s3_object_version = self.properties.get('SS3ObjectVersion')
18
19 if zip_file and s3_bucket:
20 raise ValueError("You can't specify both 'S3Bucket' and 'ZipFile'")
21 if zip_file and s3_key:
22 raise ValueError("You can't specify both 'S3Key' and 'ZipFile'")
23 if zip_file and s3_object_version:
24 raise ValueError(
25 "You can't specify both 'S3ObjectVersion' and 'ZipFile'"
26 )
27 if not zip_file and not (s3_bucket and s3_key):
28 raise ValueError(
29 "You must specify a bucket location (both the 'S3Bucket' and "
30 "'S3Key' properties) or the 'ZipFile' property"
31 )
32
33
34 class EventSourceMapping(AWSObject):
35 resource_type = "AWS::Lambda::EventSourceMapping"
36
37 props = {
38 'BatchSize': (positive_integer, False),
39 'Enabled': (bool, False),
40 'EventSourceArn': (basestring, True),
41 'FunctionName': (basestring, True),
42 'StartingPosition': (basestring, True),
43 }
44
45
46 class Function(AWSObject):
47 resource_type = "AWS::Lambda::Function"
48
49 props = {
50 'Code': (Code, True),
51 'Description': (basestring, False),
52 'Handler': (basestring, True),
53 'MemorySize': (positive_integer, False),
54 'Role': ([basestring, GetAtt], True),
55 'Runtime': (basestring, True),
56 'Timeout': (positive_integer, False),
57 }
58
59
60 class Permission(AWSObject):
61 resource_type = "AWS::Lambda::Permission"
62
63 props = {
64 'Action': (basestring, True),
65 'FunctionName': (basestring, True),
66 'Principal': (basestring, True),
67 'SourceAccount': (basestring, False),
68 'SourceArn': (basestring, False),
69 }
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/troposphere/awslambda.py b/troposphere/awslambda.py
--- a/troposphere/awslambda.py
+++ b/troposphere/awslambda.py
@@ -1,4 +1,4 @@
-from . import AWSObject, AWSProperty, GetAtt
+from . import AWSObject, AWSProperty
from .validators import positive_integer
@@ -51,7 +51,7 @@
'Description': (basestring, False),
'Handler': (basestring, True),
'MemorySize': (positive_integer, False),
- 'Role': ([basestring, GetAtt], True),
+ 'Role': (basestring, True),
'Runtime': (basestring, True),
'Timeout': (positive_integer, False),
}
| {"golden_diff": "diff --git a/troposphere/awslambda.py b/troposphere/awslambda.py\n--- a/troposphere/awslambda.py\n+++ b/troposphere/awslambda.py\n@@ -1,4 +1,4 @@\n-from . import AWSObject, AWSProperty, GetAtt\n+from . import AWSObject, AWSProperty\n from .validators import positive_integer\n \n \n@@ -51,7 +51,7 @@\n 'Description': (basestring, False),\n 'Handler': (basestring, True),\n 'MemorySize': (positive_integer, False),\n- 'Role': ([basestring, GetAtt], True),\n+ 'Role': (basestring, True),\n 'Runtime': (basestring, True),\n 'Timeout': (positive_integer, False),\n }\n", "issue": "Function Role property doesn't work with string value\nThe following results in a TypeError for the Role property type. I don't get a TypeError if I use GetAtt for Role's type, but I need to use a string. Looks like an issue in which the expected type is a list of possible types, but the validation on types then thinks the value should be a list instead of being one of the types in the list.\n\n```\ncode = Code(\n S3Bucket=self.s3Bucket,\n S3Key=self.s3Key\n )\n\nfunc = Function(\n \"title\",\n Code=code,\n Handler=\"index.handler\",\n Role=\"role-arn-here\",\n Runtime=\"nodejs\",\n )\n```\n\n```\n File \"C:\\Users\\bmorin\\AppData\\Local\\Programs\\Python\\Python35\\lib\\site-packages\\troposphere\\__init__.py\", line 66, in __init__\n self.__setattr__(k, v)\n File \"C:\\Users\\bmorin\\AppData\\Local\\Programs\\Python\\Python35\\lib\\site-packages\\troposphere\\__init__.py\", line 110, in __setattr__\n self._raise_type(name, value, expected_type)\n File \"C:\\Users\\bmorin\\AppData\\Local\\Programs\\Python\\Python35\\lib\\site-packages\\troposphere\\__init__.py\", line 145, in _raise_type\n expected_type))\nTypeError: <class 'troposphere.awslambda.Function'>: EventssomeFunction.Role is <class 'str'>, expected [<class 'str'>, <class 'troposphere.GetAtt'>]\n```\n\nSuspected issue spot in `__init__.py:`\n\n```\n# If we're expecting a list, then make sure it is a list\nif not isinstance(value, list):\n self._raise_type(name, value, expected_type)\n```\n\nAnd the definition of Role in `awslambda.py:`\n\n```\nclass Function(AWSObject):\n resource_type = \"AWS::Lambda::Function\"\n\n props = {\n 'Code': (Code, True),\n 'Description': (str, False),\n 'Handler': (str, True),\n 'MemorySize': (positive_integer, False),\n 'Role': ([str, GetAtt], True),\n 'Runtime': (str, True),\n 'Timeout': (positive_integer, False),\n }\n```\n\n", "before_files": [{"content": "from . import AWSObject, AWSProperty, GetAtt\nfrom .validators import positive_integer\n\n\nclass Code(AWSProperty):\n props = {\n 'S3Bucket': (basestring, False),\n 'S3Key': (basestring, False),\n 'S3ObjectVersion': (basestring, False),\n 'ZipFile': (basestring, False)\n }\n\n def validate(self):\n zip_file = self.properties.get('ZipFile')\n s3_bucket = self.properties.get('S3Bucket')\n s3_key = self.properties.get('S3Key')\n s3_object_version = self.properties.get('SS3ObjectVersion')\n\n if zip_file and s3_bucket:\n raise ValueError(\"You can't specify both 'S3Bucket' and 'ZipFile'\")\n if zip_file and s3_key:\n raise ValueError(\"You can't specify both 'S3Key' and 'ZipFile'\")\n if zip_file and s3_object_version:\n raise ValueError(\n \"You can't specify both 'S3ObjectVersion' and 'ZipFile'\"\n )\n if not zip_file and not (s3_bucket and s3_key):\n raise ValueError(\n \"You must specify a bucket location (both the 'S3Bucket' and \"\n \"'S3Key' properties) or the 'ZipFile' property\"\n )\n\n\nclass EventSourceMapping(AWSObject):\n resource_type = \"AWS::Lambda::EventSourceMapping\"\n\n props = {\n 'BatchSize': (positive_integer, False),\n 'Enabled': (bool, False),\n 'EventSourceArn': (basestring, True),\n 'FunctionName': (basestring, True),\n 'StartingPosition': (basestring, True),\n }\n\n\nclass Function(AWSObject):\n resource_type = \"AWS::Lambda::Function\"\n\n props = {\n 'Code': (Code, True),\n 'Description': (basestring, False),\n 'Handler': (basestring, True),\n 'MemorySize': (positive_integer, False),\n 'Role': ([basestring, GetAtt], True),\n 'Runtime': (basestring, True),\n 'Timeout': (positive_integer, False),\n }\n\n\nclass Permission(AWSObject):\n resource_type = \"AWS::Lambda::Permission\"\n\n props = {\n 'Action': (basestring, True),\n 'FunctionName': (basestring, True),\n 'Principal': (basestring, True),\n 'SourceAccount': (basestring, False),\n 'SourceArn': (basestring, False),\n }\n", "path": "troposphere/awslambda.py"}], "after_files": [{"content": "from . import AWSObject, AWSProperty\nfrom .validators import positive_integer\n\n\nclass Code(AWSProperty):\n props = {\n 'S3Bucket': (basestring, False),\n 'S3Key': (basestring, False),\n 'S3ObjectVersion': (basestring, False),\n 'ZipFile': (basestring, False)\n }\n\n def validate(self):\n zip_file = self.properties.get('ZipFile')\n s3_bucket = self.properties.get('S3Bucket')\n s3_key = self.properties.get('S3Key')\n s3_object_version = self.properties.get('SS3ObjectVersion')\n\n if zip_file and s3_bucket:\n raise ValueError(\"You can't specify both 'S3Bucket' and 'ZipFile'\")\n if zip_file and s3_key:\n raise ValueError(\"You can't specify both 'S3Key' and 'ZipFile'\")\n if zip_file and s3_object_version:\n raise ValueError(\n \"You can't specify both 'S3ObjectVersion' and 'ZipFile'\"\n )\n if not zip_file and not (s3_bucket and s3_key):\n raise ValueError(\n \"You must specify a bucket location (both the 'S3Bucket' and \"\n \"'S3Key' properties) or the 'ZipFile' property\"\n )\n\n\nclass EventSourceMapping(AWSObject):\n resource_type = \"AWS::Lambda::EventSourceMapping\"\n\n props = {\n 'BatchSize': (positive_integer, False),\n 'Enabled': (bool, False),\n 'EventSourceArn': (basestring, True),\n 'FunctionName': (basestring, True),\n 'StartingPosition': (basestring, True),\n }\n\n\nclass Function(AWSObject):\n resource_type = \"AWS::Lambda::Function\"\n\n props = {\n 'Code': (Code, True),\n 'Description': (basestring, False),\n 'Handler': (basestring, True),\n 'MemorySize': (positive_integer, False),\n 'Role': (basestring, True),\n 'Runtime': (basestring, True),\n 'Timeout': (positive_integer, False),\n }\n\n\nclass Permission(AWSObject):\n resource_type = \"AWS::Lambda::Permission\"\n\n props = {\n 'Action': (basestring, True),\n 'FunctionName': (basestring, True),\n 'Principal': (basestring, True),\n 'SourceAccount': (basestring, False),\n 'SourceArn': (basestring, False),\n }\n", "path": "troposphere/awslambda.py"}]} | 1,461 | 174 |
gh_patches_debug_27866 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-1247 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Segfault when exiting Command Line Usage Example
<!-- In the following, please describe your issue in detail! -->
<!-- If some of the sections do not apply, just remove them. -->
### Short description
When Running the command line example, and closing the window, I get a segmentation fault
### Code to reproduce
<!-- Please provide a minimal working example that reproduces the issue in the code block below.
Ideally, this should be a full example someone else could run without additional setup. -->
```bash
python -m pyqtgraph.examples
```
Run the command line usage example, and close out the window.
### Expected behavior
Window closes
### Real behavior
Window closes, generates a segfault
```
/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py:125: RuntimeWarning: 'pyqtgraph.examples.__main__' found in sys.modules after import of package 'pyqtgraph.examples', but prior to execution of 'pyqtgraph.examples.__main__'; this may result in unpredictable behaviour
warn(RuntimeWarning(msg))
qt.qpa.fonts: Populating font family aliases took 246 ms. Replace uses of missing font family "FreeMono" with one that exists to avoid this cost.
Using PyQt5 (default graphics system)
Fatal Python error: Segmentation fault
Current thread 0x000000010840bdc0 (most recent call first):
File "/Users/ognyan/Developer/pyqtgraph/.venv/lib/python3.7/site-packages/pyqtgraph/examples/CLIexample.py", line 26 in <module>
```
### Tested environment(s)
* PyQtGraph version: 0.11
* Qt Python binding: PySide2 5.15.0 and PyQt5 5.15.0
* Python version: 3.7.7
* NumPy version: 1.18.4
* Operating system: macOS Catalina
* Installation method: pip
### Additional context
This is not an issue with 0.11.rc0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyqtgraph/graphicsWindows.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 DEPRECATED: The classes below are convenience classes that create a new window
4 containting a single, specific widget. These classes are now unnecessary because
5 it is possible to place any widget into its own window by simply calling its
6 show() method.
7 """
8
9 from .Qt import QtCore, QtGui, mkQApp
10 from .widgets.PlotWidget import *
11 from .imageview import *
12 from .widgets.GraphicsLayoutWidget import GraphicsLayoutWidget
13 from .widgets.GraphicsView import GraphicsView
14
15
16 class GraphicsWindow(GraphicsLayoutWidget):
17 """
18 (deprecated; use :class:`~pyqtgraph.GraphicsLayoutWidget` instead)
19
20 Convenience subclass of :class:`~pyqtgraph.GraphicsLayoutWidget`. This class
21 is intended for use from the interactive python prompt.
22 """
23 def __init__(self, title=None, size=(800,600), **kargs):
24 mkQApp()
25 GraphicsLayoutWidget.__init__(self, **kargs)
26 self.resize(*size)
27 if title is not None:
28 self.setWindowTitle(title)
29 self.show()
30
31
32 class TabWindow(QtGui.QMainWindow):
33 """
34 (deprecated)
35 """
36 def __init__(self, title=None, size=(800,600)):
37 mkQApp()
38 QtGui.QMainWindow.__init__(self)
39 self.resize(*size)
40 self.cw = QtGui.QTabWidget()
41 self.setCentralWidget(self.cw)
42 if title is not None:
43 self.setWindowTitle(title)
44 self.show()
45
46 def __getattr__(self, attr):
47 return getattr(self.cw, attr)
48
49
50 class PlotWindow(PlotWidget):
51 sigClosed = QtCore.Signal(object)
52
53 """
54 (deprecated; use :class:`~pyqtgraph.PlotWidget` instead)
55 """
56 def __init__(self, title=None, **kargs):
57 mkQApp()
58 PlotWidget.__init__(self, **kargs)
59 if title is not None:
60 self.setWindowTitle(title)
61 self.show()
62
63 def closeEvent(self, event):
64 PlotWidget.closeEvent(self, event)
65 self.sigClosed.emit(self)
66
67
68 class ImageWindow(ImageView):
69 sigClosed = QtCore.Signal(object)
70
71 """
72 (deprecated; use :class:`~pyqtgraph.ImageView` instead)
73 """
74 def __init__(self, *args, **kargs):
75 mkQApp()
76 ImageView.__init__(self)
77 if 'title' in kargs:
78 self.setWindowTitle(kargs['title'])
79 del kargs['title']
80 if len(args) > 0 or len(kargs) > 0:
81 self.setImage(*args, **kargs)
82 self.show()
83
84 def closeEvent(self, event):
85 ImageView.closeEvent(self, event)
86 self.sigClosed.emit(self)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyqtgraph/graphicsWindows.py b/pyqtgraph/graphicsWindows.py
--- a/pyqtgraph/graphicsWindows.py
+++ b/pyqtgraph/graphicsWindows.py
@@ -55,10 +55,14 @@
"""
def __init__(self, title=None, **kargs):
mkQApp()
+ self.win = QtGui.QMainWindow()
PlotWidget.__init__(self, **kargs)
+ self.win.setCentralWidget(self)
+ for m in ['resize']:
+ setattr(self, m, getattr(self.win, m))
if title is not None:
- self.setWindowTitle(title)
- self.show()
+ self.win.setWindowTitle(title)
+ self.win.show()
def closeEvent(self, event):
PlotWidget.closeEvent(self, event)
@@ -73,14 +77,20 @@
"""
def __init__(self, *args, **kargs):
mkQApp()
- ImageView.__init__(self)
+ self.win = QtGui.QMainWindow()
+ self.win.resize(800,600)
if 'title' in kargs:
- self.setWindowTitle(kargs['title'])
+ self.win.setWindowTitle(kargs['title'])
del kargs['title']
+ ImageView.__init__(self, self.win)
if len(args) > 0 or len(kargs) > 0:
self.setImage(*args, **kargs)
- self.show()
-
+
+ self.win.setCentralWidget(self)
+ for m in ['resize']:
+ setattr(self, m, getattr(self.win, m))
+ self.win.show()
+
def closeEvent(self, event):
ImageView.closeEvent(self, event)
self.sigClosed.emit(self)
| {"golden_diff": "diff --git a/pyqtgraph/graphicsWindows.py b/pyqtgraph/graphicsWindows.py\n--- a/pyqtgraph/graphicsWindows.py\n+++ b/pyqtgraph/graphicsWindows.py\n@@ -55,10 +55,14 @@\n \"\"\"\n def __init__(self, title=None, **kargs):\n mkQApp()\n+ self.win = QtGui.QMainWindow()\n PlotWidget.__init__(self, **kargs)\n+ self.win.setCentralWidget(self)\n+ for m in ['resize']:\n+ setattr(self, m, getattr(self.win, m))\n if title is not None:\n- self.setWindowTitle(title)\n- self.show()\n+ self.win.setWindowTitle(title)\n+ self.win.show()\n \n def closeEvent(self, event):\n PlotWidget.closeEvent(self, event)\n@@ -73,14 +77,20 @@\n \"\"\"\n def __init__(self, *args, **kargs):\n mkQApp()\n- ImageView.__init__(self)\n+ self.win = QtGui.QMainWindow()\n+ self.win.resize(800,600)\n if 'title' in kargs:\n- self.setWindowTitle(kargs['title'])\n+ self.win.setWindowTitle(kargs['title'])\n del kargs['title']\n+ ImageView.__init__(self, self.win)\n if len(args) > 0 or len(kargs) > 0:\n self.setImage(*args, **kargs)\n- self.show()\n-\n+ \n+ self.win.setCentralWidget(self)\n+ for m in ['resize']:\n+ setattr(self, m, getattr(self.win, m))\n+ self.win.show()\n+ \n def closeEvent(self, event):\n ImageView.closeEvent(self, event)\n self.sigClosed.emit(self)\n", "issue": "Segfault when exiting Command Line Usage Example\n<!-- In the following, please describe your issue in detail! -->\r\n<!-- If some of the sections do not apply, just remove them. -->\r\n\r\n### Short description\r\nWhen Running the command line example, and closing the window, I get a segmentation fault\r\n\r\n### Code to reproduce\r\n<!-- Please provide a minimal working example that reproduces the issue in the code block below.\r\n Ideally, this should be a full example someone else could run without additional setup. -->\r\n```bash\r\npython -m pyqtgraph.examples\r\n```\r\n\r\nRun the command line usage example, and close out the window.\r\n\r\n\r\n### Expected behavior\r\nWindow closes\r\n\r\n### Real behavior\r\nWindow closes, generates a segfault\r\n\r\n```\r\n/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/runpy.py:125: RuntimeWarning: 'pyqtgraph.examples.__main__' found in sys.modules after import of package 'pyqtgraph.examples', but prior to execution of 'pyqtgraph.examples.__main__'; this may result in unpredictable behaviour\r\n warn(RuntimeWarning(msg))\r\nqt.qpa.fonts: Populating font family aliases took 246 ms. Replace uses of missing font family \"FreeMono\" with one that exists to avoid this cost.\r\nUsing PyQt5 (default graphics system)\r\nFatal Python error: Segmentation fault\r\n\r\nCurrent thread 0x000000010840bdc0 (most recent call first):\r\n File \"/Users/ognyan/Developer/pyqtgraph/.venv/lib/python3.7/site-packages/pyqtgraph/examples/CLIexample.py\", line 26 in <module>\r\n```\r\n\r\n### Tested environment(s)\r\n\r\n * PyQtGraph version: 0.11\r\n * Qt Python binding: PySide2 5.15.0 and PyQt5 5.15.0\r\n * Python version: 3.7.7\r\n * NumPy version: 1.18.4\r\n * Operating system: macOS Catalina\r\n * Installation method: pip\r\n\r\n### Additional context\r\n\r\nThis is not an issue with 0.11.rc0\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDEPRECATED: The classes below are convenience classes that create a new window\ncontainting a single, specific widget. These classes are now unnecessary because\nit is possible to place any widget into its own window by simply calling its\nshow() method.\n\"\"\"\n\nfrom .Qt import QtCore, QtGui, mkQApp\nfrom .widgets.PlotWidget import *\nfrom .imageview import *\nfrom .widgets.GraphicsLayoutWidget import GraphicsLayoutWidget\nfrom .widgets.GraphicsView import GraphicsView\n\n\nclass GraphicsWindow(GraphicsLayoutWidget):\n \"\"\"\n (deprecated; use :class:`~pyqtgraph.GraphicsLayoutWidget` instead)\n \n Convenience subclass of :class:`~pyqtgraph.GraphicsLayoutWidget`. This class\n is intended for use from the interactive python prompt.\n \"\"\"\n def __init__(self, title=None, size=(800,600), **kargs):\n mkQApp()\n GraphicsLayoutWidget.__init__(self, **kargs)\n self.resize(*size)\n if title is not None:\n self.setWindowTitle(title)\n self.show()\n \n\nclass TabWindow(QtGui.QMainWindow):\n \"\"\"\n (deprecated)\n \"\"\"\n def __init__(self, title=None, size=(800,600)):\n mkQApp()\n QtGui.QMainWindow.__init__(self)\n self.resize(*size)\n self.cw = QtGui.QTabWidget()\n self.setCentralWidget(self.cw)\n if title is not None:\n self.setWindowTitle(title)\n self.show()\n \n def __getattr__(self, attr):\n return getattr(self.cw, attr)\n \n\nclass PlotWindow(PlotWidget):\n sigClosed = QtCore.Signal(object)\n\n \"\"\"\n (deprecated; use :class:`~pyqtgraph.PlotWidget` instead)\n \"\"\"\n def __init__(self, title=None, **kargs):\n mkQApp()\n PlotWidget.__init__(self, **kargs)\n if title is not None:\n self.setWindowTitle(title)\n self.show()\n\n def closeEvent(self, event):\n PlotWidget.closeEvent(self, event)\n self.sigClosed.emit(self)\n\n\nclass ImageWindow(ImageView):\n sigClosed = QtCore.Signal(object)\n\n \"\"\"\n (deprecated; use :class:`~pyqtgraph.ImageView` instead)\n \"\"\"\n def __init__(self, *args, **kargs):\n mkQApp()\n ImageView.__init__(self)\n if 'title' in kargs:\n self.setWindowTitle(kargs['title'])\n del kargs['title']\n if len(args) > 0 or len(kargs) > 0:\n self.setImage(*args, **kargs)\n self.show()\n\n def closeEvent(self, event):\n ImageView.closeEvent(self, event)\n self.sigClosed.emit(self)\n", "path": "pyqtgraph/graphicsWindows.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDEPRECATED: The classes below are convenience classes that create a new window\ncontainting a single, specific widget. These classes are now unnecessary because\nit is possible to place any widget into its own window by simply calling its\nshow() method.\n\"\"\"\n\nfrom .Qt import QtCore, QtGui, mkQApp\nfrom .widgets.PlotWidget import *\nfrom .imageview import *\nfrom .widgets.GraphicsLayoutWidget import GraphicsLayoutWidget\nfrom .widgets.GraphicsView import GraphicsView\n\n\nclass GraphicsWindow(GraphicsLayoutWidget):\n \"\"\"\n (deprecated; use :class:`~pyqtgraph.GraphicsLayoutWidget` instead)\n \n Convenience subclass of :class:`~pyqtgraph.GraphicsLayoutWidget`. This class\n is intended for use from the interactive python prompt.\n \"\"\"\n def __init__(self, title=None, size=(800,600), **kargs):\n mkQApp()\n GraphicsLayoutWidget.__init__(self, **kargs)\n self.resize(*size)\n if title is not None:\n self.setWindowTitle(title)\n self.show()\n \n\nclass TabWindow(QtGui.QMainWindow):\n \"\"\"\n (deprecated)\n \"\"\"\n def __init__(self, title=None, size=(800,600)):\n mkQApp()\n QtGui.QMainWindow.__init__(self)\n self.resize(*size)\n self.cw = QtGui.QTabWidget()\n self.setCentralWidget(self.cw)\n if title is not None:\n self.setWindowTitle(title)\n self.show()\n \n def __getattr__(self, attr):\n return getattr(self.cw, attr)\n \n\nclass PlotWindow(PlotWidget):\n sigClosed = QtCore.Signal(object)\n\n \"\"\"\n (deprecated; use :class:`~pyqtgraph.PlotWidget` instead)\n \"\"\"\n def __init__(self, title=None, **kargs):\n mkQApp()\n self.win = QtGui.QMainWindow()\n PlotWidget.__init__(self, **kargs)\n self.win.setCentralWidget(self)\n for m in ['resize']:\n setattr(self, m, getattr(self.win, m))\n if title is not None:\n self.win.setWindowTitle(title)\n self.win.show()\n\n def closeEvent(self, event):\n PlotWidget.closeEvent(self, event)\n self.sigClosed.emit(self)\n\n\nclass ImageWindow(ImageView):\n sigClosed = QtCore.Signal(object)\n\n \"\"\"\n (deprecated; use :class:`~pyqtgraph.ImageView` instead)\n \"\"\"\n def __init__(self, *args, **kargs):\n mkQApp()\n self.win = QtGui.QMainWindow()\n self.win.resize(800,600)\n if 'title' in kargs:\n self.win.setWindowTitle(kargs['title'])\n del kargs['title']\n ImageView.__init__(self, self.win)\n if len(args) > 0 or len(kargs) > 0:\n self.setImage(*args, **kargs)\n \n self.win.setCentralWidget(self)\n for m in ['resize']:\n setattr(self, m, getattr(self.win, m))\n self.win.show()\n \n def closeEvent(self, event):\n ImageView.closeEvent(self, event)\n self.sigClosed.emit(self)\n", "path": "pyqtgraph/graphicsWindows.py"}]} | 1,476 | 383 |
gh_patches_debug_8470 | rasdani/github-patches | git_diff | Kinto__kinto-338 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
kinto start fails: no module named functools32
```
kinto start
Starting subprocess with file monitor
2015-11-29 10:12:24,821 INFO [venusian][MainThread] kinto 1.9.0 starting.
Traceback (most recent call last):
File "/var/www/kinto.leplat.re/venv/bin/kinto", line 9, in <module>
load_entry_point('kinto==1.9.0', 'console_scripts', 'kinto')()
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/__main__.py", line 55, in main
pserve.main(pserve_argv)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/scripts/pserve.py", line 60, in main
return command.run()
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/scripts/pserve.py", line 366, in run
global_conf=vars)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/scripts/pserve.py", line 401, in loadapp
return loadapp(app_spec, name=name, relative_to=relative_to, **kw)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py", line 247, in loadapp
return loadobj(APP, uri, name=name, **kw)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py", line 272, in loadobj
return context.create()
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py", line 710, in create
return self.object_type.invoke(self)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py", line 146, in invoke
return fix_call(context.object, context.global_conf, **context.local_conf)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/util.py", line 55, in fix_call
val = callable(*args, **kw)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/__init__.py", line 77, in main
config.scan("kinto.views", **kwargs)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/config/__init__.py", line 974, in scan
ignore=ignore)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/venusian/__init__.py", line 205, in scan
__import__(modname)
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/views/buckets.py", line 12, in <module>
from kinto.views.collections import Collection
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/views/collections.py", line 2, in <module>
import jsonschema
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/__init__.py", line 12, in <module>
from jsonschema.exceptions import (
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/exceptions.py", line 6, in <module>
from jsonschema import _utils
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/_utils.py", line 6, in <module>
from jsonschema.compat import str_types, MutableMapping, urlsplit
File "/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/compat.py", line 39, in <module>
from functools32 import lru_cache
ImportError: No module named functools32
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import codecs
3 from setuptools import setup, find_packages
4
5 here = os.path.abspath(os.path.dirname(__file__))
6
7
8 def read_file(filename):
9 """Open a related file and return its content."""
10 with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:
11 content = f.read()
12 return content
13
14 README = read_file('README.rst')
15 CHANGELOG = read_file('CHANGELOG.rst')
16 CONTRIBUTORS = read_file('CONTRIBUTORS.rst')
17
18 REQUIREMENTS = [
19 'waitress',
20 'cliquet>=2.13,<3',
21 'jsonschema',
22 ]
23
24 POSTGRESQL_REQUIREMENTS = REQUIREMENTS + [
25 'cliquet[postgresql]>=2.13,<3'
26 ]
27
28 MONITORING_REQUIREMENTS = REQUIREMENTS + [
29 'cliquet[monitoring]>=2.13,<3'
30 ]
31
32 FXA_REQUIREMENTS = REQUIREMENTS + [
33 'cliquet-fxa'
34 ]
35
36 ENTRY_POINTS = {
37 'paste.app_factory': [
38 'main = kinto:main',
39 ],
40 'console_scripts': [
41 'kinto = kinto.__main__:main'
42 ],
43 }
44
45 DEPENDENCY_LINKS = [
46 ]
47
48 setup(name='kinto',
49 version='1.11.0.dev0',
50 description='Kinto Web Service - Store, Sync, Share, and Self-Host.',
51 long_description=README + "\n\n" + CHANGELOG + "\n\n" + CONTRIBUTORS,
52 license='Apache License (2.0)',
53 classifiers=[
54 "Programming Language :: Python",
55 "Programming Language :: Python :: 2",
56 "Programming Language :: Python :: 2.7",
57 "Programming Language :: Python :: 3",
58 "Programming Language :: Python :: 3.4",
59 "Programming Language :: Python :: 3.5",
60 "Programming Language :: Python :: Implementation :: CPython",
61 "Programming Language :: Python :: Implementation :: PyPy",
62 "Topic :: Internet :: WWW/HTTP",
63 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
64 "License :: OSI Approved :: Apache Software License"
65 ],
66 keywords="web services",
67 author='Mozilla Services',
68 author_email='[email protected]',
69 url='https://github.com/Kinto/kinto',
70 packages=find_packages(),
71 include_package_data=True,
72 zip_safe=False,
73 install_requires=REQUIREMENTS,
74 extras_require={
75 'postgresql': POSTGRESQL_REQUIREMENTS,
76 'monitoring': MONITORING_REQUIREMENTS,
77 'fxa': FXA_REQUIREMENTS,
78 },
79 entry_points=ENTRY_POINTS,
80 dependency_links=DEPENDENCY_LINKS)
81
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,6 @@
-import os
import codecs
+import os
+import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
@@ -21,6 +22,11 @@
'jsonschema',
]
+if sys.version_info < (3,):
+ REQUIREMENTS.extend([
+ 'functools32', # not installed by jsonschema with old pip versions.
+ ])
+
POSTGRESQL_REQUIREMENTS = REQUIREMENTS + [
'cliquet[postgresql]>=2.13,<3'
]
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,5 +1,6 @@\n-import os\n import codecs\n+import os\n+import sys\n from setuptools import setup, find_packages\n \n here = os.path.abspath(os.path.dirname(__file__))\n@@ -21,6 +22,11 @@\n 'jsonschema',\n ]\n \n+if sys.version_info < (3,):\n+ REQUIREMENTS.extend([\n+ 'functools32', # not installed by jsonschema with old pip versions.\n+ ])\n+\n POSTGRESQL_REQUIREMENTS = REQUIREMENTS + [\n 'cliquet[postgresql]>=2.13,<3'\n ]\n", "issue": "kinto start fails: no module named functools32\n```\nkinto start\nStarting subprocess with file monitor\n2015-11-29 10:12:24,821 INFO [venusian][MainThread] kinto 1.9.0 starting. \nTraceback (most recent call last):\n File \"/var/www/kinto.leplat.re/venv/bin/kinto\", line 9, in <module>\n load_entry_point('kinto==1.9.0', 'console_scripts', 'kinto')()\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/__main__.py\", line 55, in main\n pserve.main(pserve_argv)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/scripts/pserve.py\", line 60, in main\n return command.run()\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/scripts/pserve.py\", line 366, in run\n global_conf=vars)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/scripts/pserve.py\", line 401, in loadapp\n return loadapp(app_spec, name=name, relative_to=relative_to, **kw)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py\", line 247, in loadapp\n return loadobj(APP, uri, name=name, **kw)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py\", line 272, in loadobj\n return context.create()\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py\", line 710, in create\n return self.object_type.invoke(self)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/loadwsgi.py\", line 146, in invoke\n return fix_call(context.object, context.global_conf, **context.local_conf)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/paste/deploy/util.py\", line 55, in fix_call\n val = callable(*args, **kw)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/__init__.py\", line 77, in main\n config.scan(\"kinto.views\", **kwargs)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/pyramid/config/__init__.py\", line 974, in scan\n ignore=ignore)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/venusian/__init__.py\", line 205, in scan\n __import__(modname)\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/views/buckets.py\", line 12, in <module>\n from kinto.views.collections import Collection\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/kinto/views/collections.py\", line 2, in <module>\n import jsonschema\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/__init__.py\", line 12, in <module>\n from jsonschema.exceptions import (\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/exceptions.py\", line 6, in <module>\n from jsonschema import _utils\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/_utils.py\", line 6, in <module>\n from jsonschema.compat import str_types, MutableMapping, urlsplit\n File \"/var/www/kinto.leplat.re/venv/local/lib/python2.7/site-packages/jsonschema/compat.py\", line 39, in <module>\n from functools32 import lru_cache\nImportError: No module named functools32\n```\n\n", "before_files": [{"content": "import os\nimport codecs\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\nREQUIREMENTS = [\n 'waitress',\n 'cliquet>=2.13,<3',\n 'jsonschema',\n]\n\nPOSTGRESQL_REQUIREMENTS = REQUIREMENTS + [\n 'cliquet[postgresql]>=2.13,<3'\n]\n\nMONITORING_REQUIREMENTS = REQUIREMENTS + [\n 'cliquet[monitoring]>=2.13,<3'\n]\n\nFXA_REQUIREMENTS = REQUIREMENTS + [\n 'cliquet-fxa'\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\nDEPENDENCY_LINKS = [\n]\n\nsetup(name='kinto',\n version='1.11.0.dev0',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description=README + \"\\n\\n\" + CHANGELOG + \"\\n\\n\" + CONTRIBUTORS,\n license='Apache License (2.0)',\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\"\n ],\n keywords=\"web services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIREMENTS,\n extras_require={\n 'postgresql': POSTGRESQL_REQUIREMENTS,\n 'monitoring': MONITORING_REQUIREMENTS,\n 'fxa': FXA_REQUIREMENTS,\n },\n entry_points=ENTRY_POINTS,\n dependency_links=DEPENDENCY_LINKS)\n", "path": "setup.py"}], "after_files": [{"content": "import codecs\nimport os\nimport sys\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\nREQUIREMENTS = [\n 'waitress',\n 'cliquet>=2.13,<3',\n 'jsonschema',\n]\n\nif sys.version_info < (3,):\n REQUIREMENTS.extend([\n 'functools32', # not installed by jsonschema with old pip versions.\n ])\n\nPOSTGRESQL_REQUIREMENTS = REQUIREMENTS + [\n 'cliquet[postgresql]>=2.13,<3'\n]\n\nMONITORING_REQUIREMENTS = REQUIREMENTS + [\n 'cliquet[monitoring]>=2.13,<3'\n]\n\nFXA_REQUIREMENTS = REQUIREMENTS + [\n 'cliquet-fxa'\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\nDEPENDENCY_LINKS = [\n]\n\nsetup(name='kinto',\n version='1.11.0.dev0',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description=README + \"\\n\\n\" + CHANGELOG + \"\\n\\n\" + CONTRIBUTORS,\n license='Apache License (2.0)',\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n \"License :: OSI Approved :: Apache Software License\"\n ],\n keywords=\"web services\",\n author='Mozilla Services',\n author_email='[email protected]',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIREMENTS,\n extras_require={\n 'postgresql': POSTGRESQL_REQUIREMENTS,\n 'monitoring': MONITORING_REQUIREMENTS,\n 'fxa': FXA_REQUIREMENTS,\n },\n entry_points=ENTRY_POINTS,\n dependency_links=DEPENDENCY_LINKS)\n", "path": "setup.py"}]} | 1,956 | 150 |
gh_patches_debug_3910 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-4935 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Instrumenting ambiguity: Integrate with SQLAlchemy or not when underlying psycopg2 used
<!--
Thanks for taking the time for reporting an issue!
Before reporting an issue on dd-trace-py, please be sure to provide all
necessary information.
If you're hitting a bug, make sure that you're using the latest version of this
library.
-->
### Summary of problem
It's not immediately clear from documentation if going through the trouble of patching and instrumenting SQLAlchemy provides any additional details over the auto-instrumented psycopg2 via `ddtrace-run`.
I currently have a Flask app using SQLAlchemy by hand (not Flask-SQLAlchemy) which connects to a Postgres database via psycopg2. Running my app with `ddtrace-run` in gunicorn successfully reports the Flask and Postgres services to APM. However upon seeing that ddtrace additionally has an [integration to SQLAlchemy](https://ddtrace.readthedocs.io/en/v1.7.1/integrations.html#sqlalchemy), I'm curious if it provides any added benefit on top of the auto instrumentation. Is it meant to be complimentary? Or is it meant to be an alternative/replacement mechanism for instrumenting the database/what psycopg2 integration would report.
I ending up debugging into `ddtrace.contrib.sqlalchemy.engine.EngineTracer` in my application to see what `Pin.get_from(self.engine)` would report without any overrides. It gave me:
```
Pin(service=postgres, tags=None, tracer=<ddtrace.tracer.Tracer object at 0xffff8bf3e580>)
```
which leads me to believe manual patching and integrating with SQLAlchemy is redundant if I'm using pscopg2 and `ddtrace-run`. Eyeballing the `EngineTracer` source code _appears_ to indicate it adds no additional benefit over psychopg2's auto-instrumentation.
Documentation should be updated (hell, I'd love a confirmatory answer here 😃 ) on whether or not one should even bother going to the trouble of manually patching SQLAlchemy in this context if they already have/use/configure SQLAlchemy's engine to connect to a postgres DB provided by psycopg2. Something to the effect, in SQLAlchemy's section:
> # SQLAlchemy
>
> Integrating with SQLAlchemy is not necessary if you're integrating psycopg and SQLAlchemy connects to a Postgres database.
>
> To trace sqlalchemy queries, add instrumentation to the engine class using the patch method that must be called before importing sqlalchemy:
> ...
### Which version of dd-trace-py are you using?
1.7.1
### Which version of pip are you using?
22.3.1
### Which libraries and their versions are you using?
```
Flask==1.1.1
SQLAlchemy==1.3.11
ddtrace==1.7.1
psycopg2-binary==2.9.1
```
### How can we reproduce your problem?
N/A
### What is the result that you get?
N/A
### What is the result that you expected?
N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/sqlalchemy/__init__.py`
Content:
```
1 """
2 To trace sqlalchemy queries, add instrumentation to the engine class
3 using the patch method that **must be called before** importing sqlalchemy::
4
5 # patch before importing `create_engine`
6 from ddtrace import Pin, patch
7 patch(sqlalchemy=True)
8
9 # use SQLAlchemy as usual
10 from sqlalchemy import create_engine
11
12 engine = create_engine('sqlite:///:memory:')
13 engine.connect().execute("SELECT COUNT(*) FROM users")
14
15 # Use a PIN to specify metadata related to this engine
16 Pin.override(engine, service='replica-db')
17 """
18 from ...internal.utils.importlib import require_modules
19
20
21 required_modules = ["sqlalchemy", "sqlalchemy.event"]
22
23 with require_modules(required_modules) as missing_modules:
24 if not missing_modules:
25 from .engine import trace_engine
26 from .patch import patch
27 from .patch import unpatch
28
29 __all__ = ["trace_engine", "patch", "unpatch"]
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ddtrace/contrib/sqlalchemy/__init__.py b/ddtrace/contrib/sqlalchemy/__init__.py
--- a/ddtrace/contrib/sqlalchemy/__init__.py
+++ b/ddtrace/contrib/sqlalchemy/__init__.py
@@ -1,4 +1,8 @@
"""
+Enabling the SQLAlchemy integration is only necessary if there is no
+instrumentation available or enabled for the underlying database engine (e.g.
+pymysql, psycopg, mysql-connector, etc.).
+
To trace sqlalchemy queries, add instrumentation to the engine class
using the patch method that **must be called before** importing sqlalchemy::
| {"golden_diff": "diff --git a/ddtrace/contrib/sqlalchemy/__init__.py b/ddtrace/contrib/sqlalchemy/__init__.py\n--- a/ddtrace/contrib/sqlalchemy/__init__.py\n+++ b/ddtrace/contrib/sqlalchemy/__init__.py\n@@ -1,4 +1,8 @@\n \"\"\"\n+Enabling the SQLAlchemy integration is only necessary if there is no\n+instrumentation available or enabled for the underlying database engine (e.g.\n+pymysql, psycopg, mysql-connector, etc.).\n+\n To trace sqlalchemy queries, add instrumentation to the engine class\n using the patch method that **must be called before** importing sqlalchemy::\n", "issue": "Instrumenting ambiguity: Integrate with SQLAlchemy or not when underlying psycopg2 used\n<!--\r\nThanks for taking the time for reporting an issue!\r\n\r\nBefore reporting an issue on dd-trace-py, please be sure to provide all\r\nnecessary information.\r\n\r\nIf you're hitting a bug, make sure that you're using the latest version of this\r\nlibrary.\r\n-->\r\n\r\n### Summary of problem\r\n\r\nIt's not immediately clear from documentation if going through the trouble of patching and instrumenting SQLAlchemy provides any additional details over the auto-instrumented psycopg2 via `ddtrace-run`.\r\n\r\nI currently have a Flask app using SQLAlchemy by hand (not Flask-SQLAlchemy) which connects to a Postgres database via psycopg2. Running my app with `ddtrace-run` in gunicorn successfully reports the Flask and Postgres services to APM. However upon seeing that ddtrace additionally has an [integration to SQLAlchemy](https://ddtrace.readthedocs.io/en/v1.7.1/integrations.html#sqlalchemy), I'm curious if it provides any added benefit on top of the auto instrumentation. Is it meant to be complimentary? Or is it meant to be an alternative/replacement mechanism for instrumenting the database/what psycopg2 integration would report.\r\n\r\nI ending up debugging into `ddtrace.contrib.sqlalchemy.engine.EngineTracer` in my application to see what `Pin.get_from(self.engine)` would report without any overrides. It gave me:\r\n\r\n```\r\nPin(service=postgres, tags=None, tracer=<ddtrace.tracer.Tracer object at 0xffff8bf3e580>)\r\n```\r\n\r\nwhich leads me to believe manual patching and integrating with SQLAlchemy is redundant if I'm using pscopg2 and `ddtrace-run`. Eyeballing the `EngineTracer` source code _appears_ to indicate it adds no additional benefit over psychopg2's auto-instrumentation.\r\n\r\nDocumentation should be updated (hell, I'd love a confirmatory answer here \ud83d\ude03 ) on whether or not one should even bother going to the trouble of manually patching SQLAlchemy in this context if they already have/use/configure SQLAlchemy's engine to connect to a postgres DB provided by psycopg2. Something to the effect, in SQLAlchemy's section:\r\n\r\n> # SQLAlchemy\r\n> \r\n> Integrating with SQLAlchemy is not necessary if you're integrating psycopg and SQLAlchemy connects to a Postgres database.\r\n>\r\n> To trace sqlalchemy queries, add instrumentation to the engine class using the patch method that must be called before importing sqlalchemy:\r\n> ...\r\n\r\n### Which version of dd-trace-py are you using?\r\n\r\n1.7.1\r\n\r\n### Which version of pip are you using?\r\n\r\n22.3.1\r\n\r\n### Which libraries and their versions are you using?\r\n\r\n```\r\nFlask==1.1.1\r\nSQLAlchemy==1.3.11\r\nddtrace==1.7.1\r\npsycopg2-binary==2.9.1\r\n```\r\n\r\n### How can we reproduce your problem?\r\n\r\nN/A\r\n\r\n### What is the result that you get?\r\n\r\nN/A\r\n\r\n### What is the result that you expected?\r\n\r\nN/A\n", "before_files": [{"content": "\"\"\"\nTo trace sqlalchemy queries, add instrumentation to the engine class\nusing the patch method that **must be called before** importing sqlalchemy::\n\n # patch before importing `create_engine`\n from ddtrace import Pin, patch\n patch(sqlalchemy=True)\n\n # use SQLAlchemy as usual\n from sqlalchemy import create_engine\n\n engine = create_engine('sqlite:///:memory:')\n engine.connect().execute(\"SELECT COUNT(*) FROM users\")\n\n # Use a PIN to specify metadata related to this engine\n Pin.override(engine, service='replica-db')\n\"\"\"\nfrom ...internal.utils.importlib import require_modules\n\n\nrequired_modules = [\"sqlalchemy\", \"sqlalchemy.event\"]\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n from .engine import trace_engine\n from .patch import patch\n from .patch import unpatch\n\n __all__ = [\"trace_engine\", \"patch\", \"unpatch\"]\n", "path": "ddtrace/contrib/sqlalchemy/__init__.py"}], "after_files": [{"content": "\"\"\"\nEnabling the SQLAlchemy integration is only necessary if there is no\ninstrumentation available or enabled for the underlying database engine (e.g.\npymysql, psycopg, mysql-connector, etc.).\n\nTo trace sqlalchemy queries, add instrumentation to the engine class\nusing the patch method that **must be called before** importing sqlalchemy::\n\n # patch before importing `create_engine`\n from ddtrace import Pin, patch\n patch(sqlalchemy=True)\n\n # use SQLAlchemy as usual\n from sqlalchemy import create_engine\n\n engine = create_engine('sqlite:///:memory:')\n engine.connect().execute(\"SELECT COUNT(*) FROM users\")\n\n # Use a PIN to specify metadata related to this engine\n Pin.override(engine, service='replica-db')\n\"\"\"\nfrom ...internal.utils.importlib import require_modules\n\n\nrequired_modules = [\"sqlalchemy\", \"sqlalchemy.event\"]\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n from .engine import trace_engine\n from .patch import patch\n from .patch import unpatch\n\n __all__ = [\"trace_engine\", \"patch\", \"unpatch\"]\n", "path": "ddtrace/contrib/sqlalchemy/__init__.py"}]} | 1,154 | 132 |
gh_patches_debug_5368 | rasdani/github-patches | git_diff | mindsdb__mindsdb-337 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add consistency check for model data object in the CI tests
We should take a snapshot of a "correct" model data object (as returned by `get_model_data()`) for the CI testing dataset and always compare the json-parsed outputs of `get_model_data` with it.
Might be a bit tedious to alter every time when we run `get_model_data` ,but otherwise there are certain situation where we can get silent failures. Since certain parts of the Model Analyzer have more general exception catching built in (since certain data simply can't be produced in various situations) and `get_model_data` itself is made to work with incomplete data in order to return this data during training.
If we want to be really scrupulous we should check the model data object at each phase of training, but I think that'd be overdoing it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mindsdb/__about__.py`
Content:
```
1 __title__ = 'MindsDB'
2 __package_name__ = 'mindsdb'
3 __version__ = '1.7.11'
4 __description__ = "MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects."
5 __email__ = "[email protected]"
6 __author__ = 'MindsDB Inc'
7 __github__ = 'https://github.com/mindsdb/mindsdb'
8 __pypi__ = 'https://pypi.org/project/mindsdb'
9 __license__ = 'MIT'
10 __copyright__ = 'Copyright 2018- mindsdb'
11
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py
--- a/mindsdb/__about__.py
+++ b/mindsdb/__about__.py
@@ -1,6 +1,6 @@
__title__ = 'MindsDB'
__package_name__ = 'mindsdb'
-__version__ = '1.7.11'
+__version__ = '1.7.12'
__description__ = "MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects."
__email__ = "[email protected]"
__author__ = 'MindsDB Inc'
| {"golden_diff": "diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py\n--- a/mindsdb/__about__.py\n+++ b/mindsdb/__about__.py\n@@ -1,6 +1,6 @@\n __title__ = 'MindsDB'\n __package_name__ = 'mindsdb'\n-__version__ = '1.7.11'\n+__version__ = '1.7.12'\n __description__ = \"MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects.\"\n __email__ = \"[email protected]\"\n __author__ = 'MindsDB Inc'\n", "issue": "Add consistency check for model data object in the CI tests\nWe should take a snapshot of a \"correct\" model data object (as returned by `get_model_data()`) for the CI testing dataset and always compare the json-parsed outputs of `get_model_data` with it.\r\n\r\nMight be a bit tedious to alter every time when we run `get_model_data` ,but otherwise there are certain situation where we can get silent failures. Since certain parts of the Model Analyzer have more general exception catching built in (since certain data simply can't be produced in various situations) and `get_model_data` itself is made to work with incomplete data in order to return this data during training.\r\n\r\nIf we want to be really scrupulous we should check the model data object at each phase of training, but I think that'd be overdoing it.\n", "before_files": [{"content": "__title__ = 'MindsDB'\n__package_name__ = 'mindsdb'\n__version__ = '1.7.11'\n__description__ = \"MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects.\"\n__email__ = \"[email protected]\"\n__author__ = 'MindsDB Inc'\n__github__ = 'https://github.com/mindsdb/mindsdb'\n__pypi__ = 'https://pypi.org/project/mindsdb'\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2018- mindsdb'\n", "path": "mindsdb/__about__.py"}], "after_files": [{"content": "__title__ = 'MindsDB'\n__package_name__ = 'mindsdb'\n__version__ = '1.7.12'\n__description__ = \"MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects.\"\n__email__ = \"[email protected]\"\n__author__ = 'MindsDB Inc'\n__github__ = 'https://github.com/mindsdb/mindsdb'\n__pypi__ = 'https://pypi.org/project/mindsdb'\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2018- mindsdb'\n", "path": "mindsdb/__about__.py"}]} | 582 | 147 |
gh_patches_debug_20887 | rasdani/github-patches | git_diff | zulip__zulip-19155 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make muting work for deactivated users
Even after a user deactivated, it may be helpful to hide that user's messages, avatar, etc. We should therefore make it possible to mute deactivated users.
This will also fix a bug where if a user tries to mute a deactivated user, the action silently fails.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/views/muting.py`
Content:
```
1 import datetime
2 from typing import Optional
3
4 from django.http import HttpRequest, HttpResponse
5 from django.utils.timezone import now as timezone_now
6 from django.utils.translation import gettext as _
7
8 from zerver.lib.actions import do_mute_topic, do_mute_user, do_unmute_topic, do_unmute_user
9 from zerver.lib.exceptions import JsonableError
10 from zerver.lib.request import REQ, has_request_variables
11 from zerver.lib.response import json_success
12 from zerver.lib.streams import (
13 access_stream_by_id,
14 access_stream_by_name,
15 access_stream_for_unmute_topic_by_id,
16 access_stream_for_unmute_topic_by_name,
17 check_for_exactly_one_stream_arg,
18 )
19 from zerver.lib.topic_mutes import topic_is_muted
20 from zerver.lib.user_mutes import get_mute_object
21 from zerver.lib.users import access_user_by_id
22 from zerver.lib.validator import check_int
23 from zerver.models import UserProfile
24
25
26 def mute_topic(
27 user_profile: UserProfile,
28 stream_id: Optional[int],
29 stream_name: Optional[str],
30 topic_name: str,
31 date_muted: datetime.datetime,
32 ) -> HttpResponse:
33 if stream_name is not None:
34 (stream, sub) = access_stream_by_name(user_profile, stream_name)
35 else:
36 assert stream_id is not None
37 (stream, sub) = access_stream_by_id(user_profile, stream_id)
38
39 if topic_is_muted(user_profile, stream.id, topic_name):
40 raise JsonableError(_("Topic already muted"))
41
42 do_mute_topic(user_profile, stream, topic_name, date_muted)
43 return json_success()
44
45
46 def unmute_topic(
47 user_profile: UserProfile, stream_id: Optional[int], stream_name: Optional[str], topic_name: str
48 ) -> HttpResponse:
49 error = _("Topic is not muted")
50
51 if stream_name is not None:
52 stream = access_stream_for_unmute_topic_by_name(user_profile, stream_name, error)
53 else:
54 assert stream_id is not None
55 stream = access_stream_for_unmute_topic_by_id(user_profile, stream_id, error)
56
57 if not topic_is_muted(user_profile, stream.id, topic_name):
58 raise JsonableError(error)
59
60 do_unmute_topic(user_profile, stream, topic_name)
61 return json_success()
62
63
64 @has_request_variables
65 def update_muted_topic(
66 request: HttpRequest,
67 user_profile: UserProfile,
68 stream_id: Optional[int] = REQ(json_validator=check_int, default=None),
69 stream: Optional[str] = REQ(default=None),
70 topic: str = REQ(),
71 op: str = REQ(),
72 ) -> HttpResponse:
73
74 check_for_exactly_one_stream_arg(stream_id=stream_id, stream=stream)
75
76 if op == "add":
77 return mute_topic(
78 user_profile=user_profile,
79 stream_id=stream_id,
80 stream_name=stream,
81 topic_name=topic,
82 date_muted=timezone_now(),
83 )
84 elif op == "remove":
85 return unmute_topic(
86 user_profile=user_profile,
87 stream_id=stream_id,
88 stream_name=stream,
89 topic_name=topic,
90 )
91
92
93 def mute_user(request: HttpRequest, user_profile: UserProfile, muted_user_id: int) -> HttpResponse:
94 if user_profile.id == muted_user_id:
95 raise JsonableError(_("Cannot mute self"))
96
97 muted_user = access_user_by_id(user_profile, muted_user_id, allow_bots=False, for_admin=False)
98 date_muted = timezone_now()
99
100 if get_mute_object(user_profile, muted_user) is not None:
101 raise JsonableError(_("User already muted"))
102
103 do_mute_user(user_profile, muted_user, date_muted)
104 return json_success()
105
106
107 def unmute_user(
108 request: HttpRequest, user_profile: UserProfile, muted_user_id: int
109 ) -> HttpResponse:
110 muted_user = access_user_by_id(user_profile, muted_user_id, allow_bots=False, for_admin=False)
111 mute_object = get_mute_object(user_profile, muted_user)
112
113 if mute_object is None:
114 raise JsonableError(_("User is not muted"))
115
116 do_unmute_user(mute_object)
117 return json_success()
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zerver/views/muting.py b/zerver/views/muting.py
--- a/zerver/views/muting.py
+++ b/zerver/views/muting.py
@@ -94,7 +94,9 @@
if user_profile.id == muted_user_id:
raise JsonableError(_("Cannot mute self"))
- muted_user = access_user_by_id(user_profile, muted_user_id, allow_bots=False, for_admin=False)
+ muted_user = access_user_by_id(
+ user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False
+ )
date_muted = timezone_now()
if get_mute_object(user_profile, muted_user) is not None:
@@ -107,7 +109,9 @@
def unmute_user(
request: HttpRequest, user_profile: UserProfile, muted_user_id: int
) -> HttpResponse:
- muted_user = access_user_by_id(user_profile, muted_user_id, allow_bots=False, for_admin=False)
+ muted_user = access_user_by_id(
+ user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False
+ )
mute_object = get_mute_object(user_profile, muted_user)
if mute_object is None:
| {"golden_diff": "diff --git a/zerver/views/muting.py b/zerver/views/muting.py\n--- a/zerver/views/muting.py\n+++ b/zerver/views/muting.py\n@@ -94,7 +94,9 @@\n if user_profile.id == muted_user_id:\n raise JsonableError(_(\"Cannot mute self\"))\n \n- muted_user = access_user_by_id(user_profile, muted_user_id, allow_bots=False, for_admin=False)\n+ muted_user = access_user_by_id(\n+ user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False\n+ )\n date_muted = timezone_now()\n \n if get_mute_object(user_profile, muted_user) is not None:\n@@ -107,7 +109,9 @@\n def unmute_user(\n request: HttpRequest, user_profile: UserProfile, muted_user_id: int\n ) -> HttpResponse:\n- muted_user = access_user_by_id(user_profile, muted_user_id, allow_bots=False, for_admin=False)\n+ muted_user = access_user_by_id(\n+ user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False\n+ )\n mute_object = get_mute_object(user_profile, muted_user)\n \n if mute_object is None:\n", "issue": "Make muting work for deactivated users\nEven after a user deactivated, it may be helpful to hide that user's messages, avatar, etc. We should therefore make it possible to mute deactivated users.\r\n\r\nThis will also fix a bug where if a user tries to mute a deactivated user, the action silently fails.\n", "before_files": [{"content": "import datetime\nfrom typing import Optional\n\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils.timezone import now as timezone_now\nfrom django.utils.translation import gettext as _\n\nfrom zerver.lib.actions import do_mute_topic, do_mute_user, do_unmute_topic, do_unmute_user\nfrom zerver.lib.exceptions import JsonableError\nfrom zerver.lib.request import REQ, has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.streams import (\n access_stream_by_id,\n access_stream_by_name,\n access_stream_for_unmute_topic_by_id,\n access_stream_for_unmute_topic_by_name,\n check_for_exactly_one_stream_arg,\n)\nfrom zerver.lib.topic_mutes import topic_is_muted\nfrom zerver.lib.user_mutes import get_mute_object\nfrom zerver.lib.users import access_user_by_id\nfrom zerver.lib.validator import check_int\nfrom zerver.models import UserProfile\n\n\ndef mute_topic(\n user_profile: UserProfile,\n stream_id: Optional[int],\n stream_name: Optional[str],\n topic_name: str,\n date_muted: datetime.datetime,\n) -> HttpResponse:\n if stream_name is not None:\n (stream, sub) = access_stream_by_name(user_profile, stream_name)\n else:\n assert stream_id is not None\n (stream, sub) = access_stream_by_id(user_profile, stream_id)\n\n if topic_is_muted(user_profile, stream.id, topic_name):\n raise JsonableError(_(\"Topic already muted\"))\n\n do_mute_topic(user_profile, stream, topic_name, date_muted)\n return json_success()\n\n\ndef unmute_topic(\n user_profile: UserProfile, stream_id: Optional[int], stream_name: Optional[str], topic_name: str\n) -> HttpResponse:\n error = _(\"Topic is not muted\")\n\n if stream_name is not None:\n stream = access_stream_for_unmute_topic_by_name(user_profile, stream_name, error)\n else:\n assert stream_id is not None\n stream = access_stream_for_unmute_topic_by_id(user_profile, stream_id, error)\n\n if not topic_is_muted(user_profile, stream.id, topic_name):\n raise JsonableError(error)\n\n do_unmute_topic(user_profile, stream, topic_name)\n return json_success()\n\n\n@has_request_variables\ndef update_muted_topic(\n request: HttpRequest,\n user_profile: UserProfile,\n stream_id: Optional[int] = REQ(json_validator=check_int, default=None),\n stream: Optional[str] = REQ(default=None),\n topic: str = REQ(),\n op: str = REQ(),\n) -> HttpResponse:\n\n check_for_exactly_one_stream_arg(stream_id=stream_id, stream=stream)\n\n if op == \"add\":\n return mute_topic(\n user_profile=user_profile,\n stream_id=stream_id,\n stream_name=stream,\n topic_name=topic,\n date_muted=timezone_now(),\n )\n elif op == \"remove\":\n return unmute_topic(\n user_profile=user_profile,\n stream_id=stream_id,\n stream_name=stream,\n topic_name=topic,\n )\n\n\ndef mute_user(request: HttpRequest, user_profile: UserProfile, muted_user_id: int) -> HttpResponse:\n if user_profile.id == muted_user_id:\n raise JsonableError(_(\"Cannot mute self\"))\n\n muted_user = access_user_by_id(user_profile, muted_user_id, allow_bots=False, for_admin=False)\n date_muted = timezone_now()\n\n if get_mute_object(user_profile, muted_user) is not None:\n raise JsonableError(_(\"User already muted\"))\n\n do_mute_user(user_profile, muted_user, date_muted)\n return json_success()\n\n\ndef unmute_user(\n request: HttpRequest, user_profile: UserProfile, muted_user_id: int\n) -> HttpResponse:\n muted_user = access_user_by_id(user_profile, muted_user_id, allow_bots=False, for_admin=False)\n mute_object = get_mute_object(user_profile, muted_user)\n\n if mute_object is None:\n raise JsonableError(_(\"User is not muted\"))\n\n do_unmute_user(mute_object)\n return json_success()\n", "path": "zerver/views/muting.py"}], "after_files": [{"content": "import datetime\nfrom typing import Optional\n\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils.timezone import now as timezone_now\nfrom django.utils.translation import gettext as _\n\nfrom zerver.lib.actions import do_mute_topic, do_mute_user, do_unmute_topic, do_unmute_user\nfrom zerver.lib.exceptions import JsonableError\nfrom zerver.lib.request import REQ, has_request_variables\nfrom zerver.lib.response import json_success\nfrom zerver.lib.streams import (\n access_stream_by_id,\n access_stream_by_name,\n access_stream_for_unmute_topic_by_id,\n access_stream_for_unmute_topic_by_name,\n check_for_exactly_one_stream_arg,\n)\nfrom zerver.lib.topic_mutes import topic_is_muted\nfrom zerver.lib.user_mutes import get_mute_object\nfrom zerver.lib.users import access_user_by_id\nfrom zerver.lib.validator import check_int\nfrom zerver.models import UserProfile\n\n\ndef mute_topic(\n user_profile: UserProfile,\n stream_id: Optional[int],\n stream_name: Optional[str],\n topic_name: str,\n date_muted: datetime.datetime,\n) -> HttpResponse:\n if stream_name is not None:\n (stream, sub) = access_stream_by_name(user_profile, stream_name)\n else:\n assert stream_id is not None\n (stream, sub) = access_stream_by_id(user_profile, stream_id)\n\n if topic_is_muted(user_profile, stream.id, topic_name):\n raise JsonableError(_(\"Topic already muted\"))\n\n do_mute_topic(user_profile, stream, topic_name, date_muted)\n return json_success()\n\n\ndef unmute_topic(\n user_profile: UserProfile, stream_id: Optional[int], stream_name: Optional[str], topic_name: str\n) -> HttpResponse:\n error = _(\"Topic is not muted\")\n\n if stream_name is not None:\n stream = access_stream_for_unmute_topic_by_name(user_profile, stream_name, error)\n else:\n assert stream_id is not None\n stream = access_stream_for_unmute_topic_by_id(user_profile, stream_id, error)\n\n if not topic_is_muted(user_profile, stream.id, topic_name):\n raise JsonableError(error)\n\n do_unmute_topic(user_profile, stream, topic_name)\n return json_success()\n\n\n@has_request_variables\ndef update_muted_topic(\n request: HttpRequest,\n user_profile: UserProfile,\n stream_id: Optional[int] = REQ(json_validator=check_int, default=None),\n stream: Optional[str] = REQ(default=None),\n topic: str = REQ(),\n op: str = REQ(),\n) -> HttpResponse:\n\n check_for_exactly_one_stream_arg(stream_id=stream_id, stream=stream)\n\n if op == \"add\":\n return mute_topic(\n user_profile=user_profile,\n stream_id=stream_id,\n stream_name=stream,\n topic_name=topic,\n date_muted=timezone_now(),\n )\n elif op == \"remove\":\n return unmute_topic(\n user_profile=user_profile,\n stream_id=stream_id,\n stream_name=stream,\n topic_name=topic,\n )\n\n\ndef mute_user(request: HttpRequest, user_profile: UserProfile, muted_user_id: int) -> HttpResponse:\n if user_profile.id == muted_user_id:\n raise JsonableError(_(\"Cannot mute self\"))\n\n muted_user = access_user_by_id(\n user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False\n )\n date_muted = timezone_now()\n\n if get_mute_object(user_profile, muted_user) is not None:\n raise JsonableError(_(\"User already muted\"))\n\n do_mute_user(user_profile, muted_user, date_muted)\n return json_success()\n\n\ndef unmute_user(\n request: HttpRequest, user_profile: UserProfile, muted_user_id: int\n) -> HttpResponse:\n muted_user = access_user_by_id(\n user_profile, muted_user_id, allow_bots=False, allow_deactivated=True, for_admin=False\n )\n mute_object = get_mute_object(user_profile, muted_user)\n\n if mute_object is None:\n raise JsonableError(_(\"User is not muted\"))\n\n do_unmute_user(mute_object)\n return json_success()\n", "path": "zerver/views/muting.py"}]} | 1,455 | 277 |
gh_patches_debug_15617 | rasdani/github-patches | git_diff | python__peps-2826 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Decide what PEPs fall under `Topic: Packaging` and act accordingly
Right now, as originally discussed in #2096 , there seem to be two plausible interpretation of what qualifies for the `Topic: Packaging` label: PEPs that _relate_ to packaging in some direct way, and PEPs that apply to packaging ecosystem (as opposed to the stdlib), and are thus discussed primarily on packaging community fora and accepted/rejected by the relevant packaging-community standing PEP delegate.
In particular, given PEP 632 (PEP-632), on deprecating and removing `disutils`, was removed from packaging at the request of @zooba for relating to the core stdlib, this would seem to suggest that PEP 453 (PEP-453) and PEP 477 (PEP-477), on `ensurepip` and its backport to Python 2.7, respectively, should also be removed by the same standard (or, alternatively, PEP 632 restored).
Presuming consensus can be reached prior to its merging, I can implement this on the existing #2690 , given it makes a number of related improvements/refinements to the packaging-tagged PEP headers.
<details>
<summary>Relevant discussion from #2096 (click to expand)</summary>
@pfmoore
> this is a core Python PEP, not a packaging PEP, and like @dstufft noted, I don't think we tend to modify those after approval.
@CAM-Gerlach
> In that case, as originally discussed on https://github.com/python/peps/pull/2656 and done for PEP 632 ([PEP-632](https://peps.python.org/632)), maybe we should drop Topic: Packaging from this PEP as well as the related PEP 477 ([PEP-477](https://peps.python.org/477)) ? I can do it in https://github.com/python/peps/pull/2690 since its closely related to the other changes in that PR (also, your input there on updating PEP 262 ([PEP-262](https://peps.python.org/262))'s status to reflect reality would be helpful).
@pfmoore
> I don't know on that. It's packaging related in the sense that it is about packaging tools, so if people are looking for things that are "about" packaging, it does belong in the Packaging topic. But it's not a "PyPA specification" in the sense defined [here](https://www.pypa.io/en/latest/specifications/), which is why I said that the normal PEP rules about modifications should apply, rather than the PyPA-specific process. Ultimately, I'm not actually sure what the rules are for when a PEP qualifies as being in the Packaging topic...
@CAM-Gerlach
> Yeah, exactly. That basically comes down to whether a Topic is considered more of a "Category" (i.e. the former), or a "Track" (i.e. the latter). I presented both alternatives at the beginning of the discussion, and both were favored at different points in its evolution; the initial consensus in the Discourse thread was something more like a "Track", but on the PR and naming of the "Topic" header that was eventually implemented (that allows multiple topics per PR), it ended up basically the latter, at least nominally.
>
> However, with PEP 632 ([PEP-632](https://peps.python.org/632)) being removed at the behest of @zooba in https://github.com/python/peps/pull/2636#discussion_r898173875 , given these two PEPs basically just concern adding/backporting a utility module from the core standard library rather than PyPA/PyPI standards, it seems to me to not make sense to keep those PEPs but not another very similar one about the deprecation and removal of what was the core packaging module. So, I suggest either removing those two, or adding back PEP 632.
@pradyunsg
> Alternatively, we can defer doing anything here until the authors have a concern with being listed under the packaging index as well or till we see/identify more instances of something like this? In any case, this discussion should move to a separate issue IMO.
@zooba
> I think this is in the same boat as PEP 632, and shouldn't be tagged as a packaging PEP.
>
> As a proposal of the rule I'd use, if we'd let the packaging delegate (i.e. Paul, right now) decide without going to python-dev or the SC, then it's a packaging PEP. Whether something is added to the standard library clearly falls outside of this scope, and so this PEP is standards track and not packaging.
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pep_sphinx_extensions/pep_zero_generator/constants.py`
Content:
```
1 """Holds type and status constants for PEP 0 generation."""
2
3 STATUS_ACCEPTED = "Accepted"
4 STATUS_ACTIVE = "Active"
5 STATUS_DEFERRED = "Deferred"
6 STATUS_DRAFT = "Draft"
7 STATUS_FINAL = "Final"
8 STATUS_PROVISIONAL = "Provisional"
9 STATUS_REJECTED = "Rejected"
10 STATUS_SUPERSEDED = "Superseded"
11 STATUS_WITHDRAWN = "Withdrawn"
12
13 # Valid values for the Status header.
14 STATUS_VALUES = {
15 STATUS_ACCEPTED, STATUS_PROVISIONAL, STATUS_REJECTED, STATUS_WITHDRAWN,
16 STATUS_DEFERRED, STATUS_FINAL, STATUS_ACTIVE, STATUS_DRAFT, STATUS_SUPERSEDED,
17 }
18 # Map of invalid/special statuses to their valid counterparts
19 SPECIAL_STATUSES = {
20 "April Fool!": STATUS_REJECTED, # See PEP 401 :)
21 }
22 # Draft PEPs have no status displayed, Active shares a key with Accepted
23 HIDE_STATUS = {STATUS_DRAFT, STATUS_ACTIVE}
24 # Dead PEP statuses
25 DEAD_STATUSES = {STATUS_REJECTED, STATUS_WITHDRAWN, STATUS_SUPERSEDED}
26
27 TYPE_INFO = "Informational"
28 TYPE_PROCESS = "Process"
29 TYPE_STANDARDS = "Standards Track"
30
31 # Valid values for the Type header.
32 TYPE_VALUES = {TYPE_STANDARDS, TYPE_INFO, TYPE_PROCESS}
33 # Active PEPs can only be for Informational or Process PEPs.
34 ACTIVE_ALLOWED = {TYPE_PROCESS, TYPE_INFO}
35
36 # map of topic -> additional description
37 SUBINDICES_BY_TOPIC = {
38 "packaging": """\
39 The canonical, up-to-date packaging specifications can be found on the
40 `Python Packaging Authority`_ (PyPA) `specifications`_ page.
41 Packaging PEPs follow the `PyPA specification update process`_.
42 They are used to propose major additions or changes to the PyPA specifications.
43
44 .. _Python Packaging Authority: https://www.pypa.io/
45 .. _specifications: https://packaging.python.org/en/latest/specifications/
46 .. _PyPA specification update process: https://www.pypa.io/en/latest/specifications/#specification-update-process
47 """,
48 "release": """\
49 A PEP is written to specify the release cycle for each feature release of Python.
50 See the `developer's guide`_ for more information.
51
52 .. _developer's guide: https://devguide.python.org/devcycle/
53 """,
54 "typing": """\
55 Many recent PEPs propose changes to Python's static type system
56 or otherwise relate to type annotations.
57 They are listed here for reference.
58 """
59 }
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pep_sphinx_extensions/pep_zero_generator/constants.py b/pep_sphinx_extensions/pep_zero_generator/constants.py
--- a/pep_sphinx_extensions/pep_zero_generator/constants.py
+++ b/pep_sphinx_extensions/pep_zero_generator/constants.py
@@ -36,10 +36,10 @@
# map of topic -> additional description
SUBINDICES_BY_TOPIC = {
"packaging": """\
-The canonical, up-to-date packaging specifications can be found on the
-`Python Packaging Authority`_ (PyPA) `specifications`_ page.
Packaging PEPs follow the `PyPA specification update process`_.
They are used to propose major additions or changes to the PyPA specifications.
+The canonical, up-to-date packaging specifications can be found on the
+`Python Packaging Authority`_ (PyPA) `specifications`_ page.
.. _Python Packaging Authority: https://www.pypa.io/
.. _specifications: https://packaging.python.org/en/latest/specifications/
| {"golden_diff": "diff --git a/pep_sphinx_extensions/pep_zero_generator/constants.py b/pep_sphinx_extensions/pep_zero_generator/constants.py\n--- a/pep_sphinx_extensions/pep_zero_generator/constants.py\n+++ b/pep_sphinx_extensions/pep_zero_generator/constants.py\n@@ -36,10 +36,10 @@\n # map of topic -> additional description\n SUBINDICES_BY_TOPIC = {\n \"packaging\": \"\"\"\\\n-The canonical, up-to-date packaging specifications can be found on the\n-`Python Packaging Authority`_ (PyPA) `specifications`_ page.\n Packaging PEPs follow the `PyPA specification update process`_.\n They are used to propose major additions or changes to the PyPA specifications.\n+The canonical, up-to-date packaging specifications can be found on the\n+`Python Packaging Authority`_ (PyPA) `specifications`_ page.\n \n .. _Python Packaging Authority: https://www.pypa.io/\n .. _specifications: https://packaging.python.org/en/latest/specifications/\n", "issue": "Decide what PEPs fall under `Topic: Packaging` and act accordingly\nRight now, as originally discussed in #2096 , there seem to be two plausible interpretation of what qualifies for the `Topic: Packaging` label: PEPs that _relate_ to packaging in some direct way, and PEPs that apply to packaging ecosystem (as opposed to the stdlib), and are thus discussed primarily on packaging community fora and accepted/rejected by the relevant packaging-community standing PEP delegate.\r\n\r\nIn particular, given PEP 632 (PEP-632), on deprecating and removing `disutils`, was removed from packaging at the request of @zooba for relating to the core stdlib, this would seem to suggest that PEP 453 (PEP-453) and PEP 477 (PEP-477), on `ensurepip` and its backport to Python 2.7, respectively, should also be removed by the same standard (or, alternatively, PEP 632 restored).\r\n\r\nPresuming consensus can be reached prior to its merging, I can implement this on the existing #2690 , given it makes a number of related improvements/refinements to the packaging-tagged PEP headers.\r\n\r\n<details>\r\n\r\n<summary>Relevant discussion from #2096 (click to expand)</summary>\r\n\r\n@pfmoore \r\n\r\n> this is a core Python PEP, not a packaging PEP, and like @dstufft noted, I don't think we tend to modify those after approval.\r\n\r\n@CAM-Gerlach \r\n\r\n> In that case, as originally discussed on https://github.com/python/peps/pull/2656 and done for PEP 632 ([PEP-632](https://peps.python.org/632)), maybe we should drop Topic: Packaging from this PEP as well as the related PEP 477 ([PEP-477](https://peps.python.org/477)) ? I can do it in https://github.com/python/peps/pull/2690 since its closely related to the other changes in that PR (also, your input there on updating PEP 262 ([PEP-262](https://peps.python.org/262))'s status to reflect reality would be helpful).\r\n\r\n@pfmoore \r\n\r\n> I don't know on that. It's packaging related in the sense that it is about packaging tools, so if people are looking for things that are \"about\" packaging, it does belong in the Packaging topic. But it's not a \"PyPA specification\" in the sense defined [here](https://www.pypa.io/en/latest/specifications/), which is why I said that the normal PEP rules about modifications should apply, rather than the PyPA-specific process. Ultimately, I'm not actually sure what the rules are for when a PEP qualifies as being in the Packaging topic...\r\n\r\n@CAM-Gerlach \r\n\r\n> Yeah, exactly. That basically comes down to whether a Topic is considered more of a \"Category\" (i.e. the former), or a \"Track\" (i.e. the latter). I presented both alternatives at the beginning of the discussion, and both were favored at different points in its evolution; the initial consensus in the Discourse thread was something more like a \"Track\", but on the PR and naming of the \"Topic\" header that was eventually implemented (that allows multiple topics per PR), it ended up basically the latter, at least nominally.\r\n>\r\n> However, with PEP 632 ([PEP-632](https://peps.python.org/632)) being removed at the behest of @zooba in https://github.com/python/peps/pull/2636#discussion_r898173875 , given these two PEPs basically just concern adding/backporting a utility module from the core standard library rather than PyPA/PyPI standards, it seems to me to not make sense to keep those PEPs but not another very similar one about the deprecation and removal of what was the core packaging module. So, I suggest either removing those two, or adding back PEP 632.\r\n\r\n@pradyunsg \r\n\r\n> Alternatively, we can defer doing anything here until the authors have a concern with being listed under the packaging index as well or till we see/identify more instances of something like this? In any case, this discussion should move to a separate issue IMO.\r\n\r\n@zooba \r\n\r\n> I think this is in the same boat as PEP 632, and shouldn't be tagged as a packaging PEP.\r\n>\r\n> As a proposal of the rule I'd use, if we'd let the packaging delegate (i.e. Paul, right now) decide without going to python-dev or the SC, then it's a packaging PEP. Whether something is added to the standard library clearly falls outside of this scope, and so this PEP is standards track and not packaging.\r\n\r\n</details>\n", "before_files": [{"content": "\"\"\"Holds type and status constants for PEP 0 generation.\"\"\"\n\nSTATUS_ACCEPTED = \"Accepted\"\nSTATUS_ACTIVE = \"Active\"\nSTATUS_DEFERRED = \"Deferred\"\nSTATUS_DRAFT = \"Draft\"\nSTATUS_FINAL = \"Final\"\nSTATUS_PROVISIONAL = \"Provisional\"\nSTATUS_REJECTED = \"Rejected\"\nSTATUS_SUPERSEDED = \"Superseded\"\nSTATUS_WITHDRAWN = \"Withdrawn\"\n\n# Valid values for the Status header.\nSTATUS_VALUES = {\n STATUS_ACCEPTED, STATUS_PROVISIONAL, STATUS_REJECTED, STATUS_WITHDRAWN,\n STATUS_DEFERRED, STATUS_FINAL, STATUS_ACTIVE, STATUS_DRAFT, STATUS_SUPERSEDED,\n}\n# Map of invalid/special statuses to their valid counterparts\nSPECIAL_STATUSES = {\n \"April Fool!\": STATUS_REJECTED, # See PEP 401 :)\n}\n# Draft PEPs have no status displayed, Active shares a key with Accepted\nHIDE_STATUS = {STATUS_DRAFT, STATUS_ACTIVE}\n# Dead PEP statuses\nDEAD_STATUSES = {STATUS_REJECTED, STATUS_WITHDRAWN, STATUS_SUPERSEDED}\n\nTYPE_INFO = \"Informational\"\nTYPE_PROCESS = \"Process\"\nTYPE_STANDARDS = \"Standards Track\"\n\n# Valid values for the Type header.\nTYPE_VALUES = {TYPE_STANDARDS, TYPE_INFO, TYPE_PROCESS}\n# Active PEPs can only be for Informational or Process PEPs.\nACTIVE_ALLOWED = {TYPE_PROCESS, TYPE_INFO}\n\n# map of topic -> additional description\nSUBINDICES_BY_TOPIC = {\n \"packaging\": \"\"\"\\\nThe canonical, up-to-date packaging specifications can be found on the\n`Python Packaging Authority`_ (PyPA) `specifications`_ page.\nPackaging PEPs follow the `PyPA specification update process`_.\nThey are used to propose major additions or changes to the PyPA specifications.\n\n.. _Python Packaging Authority: https://www.pypa.io/\n.. _specifications: https://packaging.python.org/en/latest/specifications/\n.. _PyPA specification update process: https://www.pypa.io/en/latest/specifications/#specification-update-process\n\"\"\",\n \"release\": \"\"\"\\\nA PEP is written to specify the release cycle for each feature release of Python.\nSee the `developer's guide`_ for more information.\n\n.. _developer's guide: https://devguide.python.org/devcycle/\n\"\"\",\n \"typing\": \"\"\"\\\nMany recent PEPs propose changes to Python's static type system\nor otherwise relate to type annotations.\nThey are listed here for reference.\n\"\"\"\n}\n", "path": "pep_sphinx_extensions/pep_zero_generator/constants.py"}], "after_files": [{"content": "\"\"\"Holds type and status constants for PEP 0 generation.\"\"\"\n\nSTATUS_ACCEPTED = \"Accepted\"\nSTATUS_ACTIVE = \"Active\"\nSTATUS_DEFERRED = \"Deferred\"\nSTATUS_DRAFT = \"Draft\"\nSTATUS_FINAL = \"Final\"\nSTATUS_PROVISIONAL = \"Provisional\"\nSTATUS_REJECTED = \"Rejected\"\nSTATUS_SUPERSEDED = \"Superseded\"\nSTATUS_WITHDRAWN = \"Withdrawn\"\n\n# Valid values for the Status header.\nSTATUS_VALUES = {\n STATUS_ACCEPTED, STATUS_PROVISIONAL, STATUS_REJECTED, STATUS_WITHDRAWN,\n STATUS_DEFERRED, STATUS_FINAL, STATUS_ACTIVE, STATUS_DRAFT, STATUS_SUPERSEDED,\n}\n# Map of invalid/special statuses to their valid counterparts\nSPECIAL_STATUSES = {\n \"April Fool!\": STATUS_REJECTED, # See PEP 401 :)\n}\n# Draft PEPs have no status displayed, Active shares a key with Accepted\nHIDE_STATUS = {STATUS_DRAFT, STATUS_ACTIVE}\n# Dead PEP statuses\nDEAD_STATUSES = {STATUS_REJECTED, STATUS_WITHDRAWN, STATUS_SUPERSEDED}\n\nTYPE_INFO = \"Informational\"\nTYPE_PROCESS = \"Process\"\nTYPE_STANDARDS = \"Standards Track\"\n\n# Valid values for the Type header.\nTYPE_VALUES = {TYPE_STANDARDS, TYPE_INFO, TYPE_PROCESS}\n# Active PEPs can only be for Informational or Process PEPs.\nACTIVE_ALLOWED = {TYPE_PROCESS, TYPE_INFO}\n\n# map of topic -> additional description\nSUBINDICES_BY_TOPIC = {\n \"packaging\": \"\"\"\\\nPackaging PEPs follow the `PyPA specification update process`_.\nThey are used to propose major additions or changes to the PyPA specifications.\nThe canonical, up-to-date packaging specifications can be found on the\n`Python Packaging Authority`_ (PyPA) `specifications`_ page.\n\n.. _Python Packaging Authority: https://www.pypa.io/\n.. _specifications: https://packaging.python.org/en/latest/specifications/\n.. _PyPA specification update process: https://www.pypa.io/en/latest/specifications/#specification-update-process\n\"\"\",\n \"release\": \"\"\"\\\nA PEP is written to specify the release cycle for each feature release of Python.\nSee the `developer's guide`_ for more information.\n\n.. _developer's guide: https://devguide.python.org/devcycle/\n\"\"\",\n \"typing\": \"\"\"\\\nMany recent PEPs propose changes to Python's static type system\nor otherwise relate to type annotations.\nThey are listed here for reference.\n\"\"\"\n}\n", "path": "pep_sphinx_extensions/pep_zero_generator/constants.py"}]} | 1,988 | 226 |
gh_patches_debug_5833 | rasdani/github-patches | git_diff | cupy__cupy-499 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Different behaviour of argmax
I found the behaviour of `argmax` is different between `numpy` and `cupy` if the array has `0` in its shape and `axis` argument is used. I don't know whether this behaviour is intended or not.
```
np.empty((0, 1)).argmax(axis=1) # array([], dtype=int64)
cupy.empty((0, 1)).argmax(axis=1)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-9-a5737d72bcba> in <module>()
----> 1 cupy.empty((0, 1)).argmax(axis=1)
cupy/core/core.pyx in cupy.core.core.ndarray.argmax (cupy/core/core.cpp:17701)()
cupy/core/core.pyx in cupy.core.core.ndarray.argmax (cupy/core/core.cpp:17556)()
cupy/core/reduction.pxi in cupy.core.core.simple_reduction_function.__call__ (cupy/core/core.cpp:52697)()
ValueError: zero-size array to reduction operation cupy_argmax which has no identity
```
I used cupy 2.0.0a1.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/logic/truth.py`
Content:
```
1 def all(a, axis=None, out=None, keepdims=False):
2 # TODO(okuta): check type
3 return a.all(axis=axis, out=out, keepdims=keepdims)
4
5
6 def any(a, axis=None, out=None, keepdims=False):
7 # TODO(okuta): check type
8 return a.any(axis=axis, out=out, keepdims=keepdims)
9
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/logic/truth.py b/cupy/logic/truth.py
--- a/cupy/logic/truth.py
+++ b/cupy/logic/truth.py
@@ -1,8 +1,11 @@
+import cupy
+
+
def all(a, axis=None, out=None, keepdims=False):
- # TODO(okuta): check type
+ assert isinstance(a, cupy.ndarray)
return a.all(axis=axis, out=out, keepdims=keepdims)
def any(a, axis=None, out=None, keepdims=False):
- # TODO(okuta): check type
+ assert isinstance(a, cupy.ndarray)
return a.any(axis=axis, out=out, keepdims=keepdims)
| {"golden_diff": "diff --git a/cupy/logic/truth.py b/cupy/logic/truth.py\n--- a/cupy/logic/truth.py\n+++ b/cupy/logic/truth.py\n@@ -1,8 +1,11 @@\n+import cupy\n+\n+\n def all(a, axis=None, out=None, keepdims=False):\n- # TODO(okuta): check type\n+ assert isinstance(a, cupy.ndarray)\n return a.all(axis=axis, out=out, keepdims=keepdims)\n \n \n def any(a, axis=None, out=None, keepdims=False):\n- # TODO(okuta): check type\n+ assert isinstance(a, cupy.ndarray)\n return a.any(axis=axis, out=out, keepdims=keepdims)\n", "issue": "Different behaviour of argmax\nI found the behaviour of `argmax` is different between `numpy` and `cupy` if the array has `0` in its shape and `axis` argument is used. I don't know whether this behaviour is intended or not.\r\n\r\n```\r\nnp.empty((0, 1)).argmax(axis=1) # array([], dtype=int64)\r\ncupy.empty((0, 1)).argmax(axis=1)\r\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\n<ipython-input-9-a5737d72bcba> in <module>()\r\n----> 1 cupy.empty((0, 1)).argmax(axis=1)\r\n\r\ncupy/core/core.pyx in cupy.core.core.ndarray.argmax (cupy/core/core.cpp:17701)()\r\n\r\ncupy/core/core.pyx in cupy.core.core.ndarray.argmax (cupy/core/core.cpp:17556)()\r\n\r\ncupy/core/reduction.pxi in cupy.core.core.simple_reduction_function.__call__ (cupy/core/core.cpp:52697)()\r\n\r\nValueError: zero-size array to reduction operation cupy_argmax which has no identity\r\n```\r\n\r\nI used cupy 2.0.0a1.\n", "before_files": [{"content": "def all(a, axis=None, out=None, keepdims=False):\n # TODO(okuta): check type\n return a.all(axis=axis, out=out, keepdims=keepdims)\n\n\ndef any(a, axis=None, out=None, keepdims=False):\n # TODO(okuta): check type\n return a.any(axis=axis, out=out, keepdims=keepdims)\n", "path": "cupy/logic/truth.py"}], "after_files": [{"content": "import cupy\n\n\ndef all(a, axis=None, out=None, keepdims=False):\n assert isinstance(a, cupy.ndarray)\n return a.all(axis=axis, out=out, keepdims=keepdims)\n\n\ndef any(a, axis=None, out=None, keepdims=False):\n assert isinstance(a, cupy.ndarray)\n return a.any(axis=axis, out=out, keepdims=keepdims)\n", "path": "cupy/logic/truth.py"}]} | 621 | 161 |
gh_patches_debug_42424 | rasdani/github-patches | git_diff | python-discord__bot-1452 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Delete messages when `!pypi` module is invalid
When users use the `!pypi` command with an invalid module, you currently get a response saying `"Package could not be found."`.
What I'm proposing it that after a delay (I believe we have `bot.constants.RedirectOutput.delete_delay` for this) the bot should delete both its response and the invocation message, like [`!d` currently does](https://github.com/python-discord/bot/blob/master/bot/exts/info/doc.py#L363-L365).
This would help reduce channel flooding when someone is trying to remember the name of a module, and since it's not a trivial change I'm happy to try and implement this myself if it's something we decide we want.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/exts/info/pypi.py`
Content:
```
1 import itertools
2 import logging
3 import random
4 import re
5
6 from discord import Embed
7 from discord.ext.commands import Cog, Context, command
8 from discord.utils import escape_markdown
9
10 from bot.bot import Bot
11 from bot.constants import Colours, NEGATIVE_REPLIES
12
13 URL = "https://pypi.org/pypi/{package}/json"
14 FIELDS = ("author", "requires_python", "summary", "license")
15 PYPI_ICON = "https://cdn.discordapp.com/emojis/766274397257334814.png"
16
17 PYPI_COLOURS = itertools.cycle((Colours.yellow, Colours.blue, Colours.white))
18
19 ILLEGAL_CHARACTERS = re.compile(r"[^a-zA-Z0-9-.]+")
20
21 log = logging.getLogger(__name__)
22
23
24 class PyPi(Cog):
25 """Cog for getting information about PyPi packages."""
26
27 def __init__(self, bot: Bot):
28 self.bot = bot
29
30 @command(name="pypi", aliases=("package", "pack"))
31 async def get_package_info(self, ctx: Context, package: str) -> None:
32 """Provide information about a specific package from PyPI."""
33 embed = Embed(
34 title=random.choice(NEGATIVE_REPLIES),
35 colour=Colours.soft_red
36 )
37 embed.set_thumbnail(url=PYPI_ICON)
38
39 if (character := re.search(ILLEGAL_CHARACTERS, package)) is not None:
40 embed.description = f"Illegal character passed into command: '{escape_markdown(character.group(0))}'"
41 await ctx.send(embed=embed)
42 return
43
44 async with self.bot.http_session.get(URL.format(package=package)) as response:
45 if response.status == 404:
46 embed.description = "Package could not be found."
47
48 elif response.status == 200 and response.content_type == "application/json":
49 response_json = await response.json()
50 info = response_json["info"]
51
52 embed.title = f"{info['name']} v{info['version']}"
53 embed.url = info['package_url']
54 embed.colour = next(PYPI_COLOURS)
55
56 for field in FIELDS:
57 field_data = info[field]
58
59 # Field could be completely empty, in some cases can be a string with whitespaces, or None.
60 if field_data and not field_data.isspace():
61 if '\n' in field_data and field == "license":
62 field_data = field_data.split('\n')[0]
63
64 embed.add_field(
65 name=field.replace("_", " ").title(),
66 value=escape_markdown(field_data),
67 inline=False,
68 )
69
70 else:
71 embed.description = "There was an error when fetching your PyPi package."
72 log.trace(f"Error when fetching PyPi package: {response.status}.")
73
74 await ctx.send(embed=embed)
75
76
77 def setup(bot: Bot) -> None:
78 """Load the PyPi cog."""
79 bot.add_cog(PyPi(bot))
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/exts/info/pypi.py b/bot/exts/info/pypi.py
--- a/bot/exts/info/pypi.py
+++ b/bot/exts/info/pypi.py
@@ -8,7 +8,7 @@
from discord.utils import escape_markdown
from bot.bot import Bot
-from bot.constants import Colours, NEGATIVE_REPLIES
+from bot.constants import Colours, NEGATIVE_REPLIES, RedirectOutput
URL = "https://pypi.org/pypi/{package}/json"
FIELDS = ("author", "requires_python", "summary", "license")
@@ -17,6 +17,7 @@
PYPI_COLOURS = itertools.cycle((Colours.yellow, Colours.blue, Colours.white))
ILLEGAL_CHARACTERS = re.compile(r"[^a-zA-Z0-9-.]+")
+INVALID_INPUT_DELETE_DELAY = RedirectOutput.delete_delay
log = logging.getLogger(__name__)
@@ -36,42 +37,49 @@
)
embed.set_thumbnail(url=PYPI_ICON)
+ error = True
+
if (character := re.search(ILLEGAL_CHARACTERS, package)) is not None:
embed.description = f"Illegal character passed into command: '{escape_markdown(character.group(0))}'"
- await ctx.send(embed=embed)
- return
- async with self.bot.http_session.get(URL.format(package=package)) as response:
- if response.status == 404:
- embed.description = "Package could not be found."
+ else:
+ async with self.bot.http_session.get(URL.format(package=package)) as response:
+ if response.status == 404:
+ embed.description = "Package could not be found."
- elif response.status == 200 and response.content_type == "application/json":
- response_json = await response.json()
- info = response_json["info"]
+ elif response.status == 200 and response.content_type == "application/json":
+ response_json = await response.json()
+ info = response_json["info"]
- embed.title = f"{info['name']} v{info['version']}"
- embed.url = info['package_url']
- embed.colour = next(PYPI_COLOURS)
+ embed.title = f"{info['name']} v{info['version']}"
+ embed.url = info['package_url']
+ embed.colour = next(PYPI_COLOURS)
- for field in FIELDS:
- field_data = info[field]
+ for field in FIELDS:
+ field_data = info[field]
- # Field could be completely empty, in some cases can be a string with whitespaces, or None.
- if field_data and not field_data.isspace():
- if '\n' in field_data and field == "license":
- field_data = field_data.split('\n')[0]
+ # Field could be completely empty, in some cases can be a string with whitespaces, or None.
+ if field_data and not field_data.isspace():
+ if '\n' in field_data and field == "license":
+ field_data = field_data.split('\n')[0]
- embed.add_field(
- name=field.replace("_", " ").title(),
- value=escape_markdown(field_data),
- inline=False,
- )
+ embed.add_field(
+ name=field.replace("_", " ").title(),
+ value=escape_markdown(field_data),
+ inline=False,
+ )
- else:
- embed.description = "There was an error when fetching your PyPi package."
- log.trace(f"Error when fetching PyPi package: {response.status}.")
+ error = False
- await ctx.send(embed=embed)
+ else:
+ embed.description = "There was an error when fetching your PyPi package."
+ log.trace(f"Error when fetching PyPi package: {response.status}.")
+
+ if error:
+ await ctx.send(embed=embed, delete_after=INVALID_INPUT_DELETE_DELAY)
+ await ctx.message.delete(delay=INVALID_INPUT_DELETE_DELAY)
+ else:
+ await ctx.send(embed=embed)
def setup(bot: Bot) -> None:
| {"golden_diff": "diff --git a/bot/exts/info/pypi.py b/bot/exts/info/pypi.py\n--- a/bot/exts/info/pypi.py\n+++ b/bot/exts/info/pypi.py\n@@ -8,7 +8,7 @@\n from discord.utils import escape_markdown\n \n from bot.bot import Bot\n-from bot.constants import Colours, NEGATIVE_REPLIES\n+from bot.constants import Colours, NEGATIVE_REPLIES, RedirectOutput\n \n URL = \"https://pypi.org/pypi/{package}/json\"\n FIELDS = (\"author\", \"requires_python\", \"summary\", \"license\")\n@@ -17,6 +17,7 @@\n PYPI_COLOURS = itertools.cycle((Colours.yellow, Colours.blue, Colours.white))\n \n ILLEGAL_CHARACTERS = re.compile(r\"[^a-zA-Z0-9-.]+\")\n+INVALID_INPUT_DELETE_DELAY = RedirectOutput.delete_delay\n \n log = logging.getLogger(__name__)\n \n@@ -36,42 +37,49 @@\n )\n embed.set_thumbnail(url=PYPI_ICON)\n \n+ error = True\n+\n if (character := re.search(ILLEGAL_CHARACTERS, package)) is not None:\n embed.description = f\"Illegal character passed into command: '{escape_markdown(character.group(0))}'\"\n- await ctx.send(embed=embed)\n- return\n \n- async with self.bot.http_session.get(URL.format(package=package)) as response:\n- if response.status == 404:\n- embed.description = \"Package could not be found.\"\n+ else:\n+ async with self.bot.http_session.get(URL.format(package=package)) as response:\n+ if response.status == 404:\n+ embed.description = \"Package could not be found.\"\n \n- elif response.status == 200 and response.content_type == \"application/json\":\n- response_json = await response.json()\n- info = response_json[\"info\"]\n+ elif response.status == 200 and response.content_type == \"application/json\":\n+ response_json = await response.json()\n+ info = response_json[\"info\"]\n \n- embed.title = f\"{info['name']} v{info['version']}\"\n- embed.url = info['package_url']\n- embed.colour = next(PYPI_COLOURS)\n+ embed.title = f\"{info['name']} v{info['version']}\"\n+ embed.url = info['package_url']\n+ embed.colour = next(PYPI_COLOURS)\n \n- for field in FIELDS:\n- field_data = info[field]\n+ for field in FIELDS:\n+ field_data = info[field]\n \n- # Field could be completely empty, in some cases can be a string with whitespaces, or None.\n- if field_data and not field_data.isspace():\n- if '\\n' in field_data and field == \"license\":\n- field_data = field_data.split('\\n')[0]\n+ # Field could be completely empty, in some cases can be a string with whitespaces, or None.\n+ if field_data and not field_data.isspace():\n+ if '\\n' in field_data and field == \"license\":\n+ field_data = field_data.split('\\n')[0]\n \n- embed.add_field(\n- name=field.replace(\"_\", \" \").title(),\n- value=escape_markdown(field_data),\n- inline=False,\n- )\n+ embed.add_field(\n+ name=field.replace(\"_\", \" \").title(),\n+ value=escape_markdown(field_data),\n+ inline=False,\n+ )\n \n- else:\n- embed.description = \"There was an error when fetching your PyPi package.\"\n- log.trace(f\"Error when fetching PyPi package: {response.status}.\")\n+ error = False\n \n- await ctx.send(embed=embed)\n+ else:\n+ embed.description = \"There was an error when fetching your PyPi package.\"\n+ log.trace(f\"Error when fetching PyPi package: {response.status}.\")\n+\n+ if error:\n+ await ctx.send(embed=embed, delete_after=INVALID_INPUT_DELETE_DELAY)\n+ await ctx.message.delete(delay=INVALID_INPUT_DELETE_DELAY)\n+ else:\n+ await ctx.send(embed=embed)\n \n \n def setup(bot: Bot) -> None:\n", "issue": "Delete messages when `!pypi` module is invalid\nWhen users use the `!pypi` command with an invalid module, you currently get a response saying `\"Package could not be found.\"`. \r\n\r\nWhat I'm proposing it that after a delay (I believe we have `bot.constants.RedirectOutput.delete_delay` for this) the bot should delete both its response and the invocation message, like [`!d` currently does](https://github.com/python-discord/bot/blob/master/bot/exts/info/doc.py#L363-L365).\r\n\r\nThis would help reduce channel flooding when someone is trying to remember the name of a module, and since it's not a trivial change I'm happy to try and implement this myself if it's something we decide we want.\n", "before_files": [{"content": "import itertools\nimport logging\nimport random\nimport re\n\nfrom discord import Embed\nfrom discord.ext.commands import Cog, Context, command\nfrom discord.utils import escape_markdown\n\nfrom bot.bot import Bot\nfrom bot.constants import Colours, NEGATIVE_REPLIES\n\nURL = \"https://pypi.org/pypi/{package}/json\"\nFIELDS = (\"author\", \"requires_python\", \"summary\", \"license\")\nPYPI_ICON = \"https://cdn.discordapp.com/emojis/766274397257334814.png\"\n\nPYPI_COLOURS = itertools.cycle((Colours.yellow, Colours.blue, Colours.white))\n\nILLEGAL_CHARACTERS = re.compile(r\"[^a-zA-Z0-9-.]+\")\n\nlog = logging.getLogger(__name__)\n\n\nclass PyPi(Cog):\n \"\"\"Cog for getting information about PyPi packages.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @command(name=\"pypi\", aliases=(\"package\", \"pack\"))\n async def get_package_info(self, ctx: Context, package: str) -> None:\n \"\"\"Provide information about a specific package from PyPI.\"\"\"\n embed = Embed(\n title=random.choice(NEGATIVE_REPLIES),\n colour=Colours.soft_red\n )\n embed.set_thumbnail(url=PYPI_ICON)\n\n if (character := re.search(ILLEGAL_CHARACTERS, package)) is not None:\n embed.description = f\"Illegal character passed into command: '{escape_markdown(character.group(0))}'\"\n await ctx.send(embed=embed)\n return\n\n async with self.bot.http_session.get(URL.format(package=package)) as response:\n if response.status == 404:\n embed.description = \"Package could not be found.\"\n\n elif response.status == 200 and response.content_type == \"application/json\":\n response_json = await response.json()\n info = response_json[\"info\"]\n\n embed.title = f\"{info['name']} v{info['version']}\"\n embed.url = info['package_url']\n embed.colour = next(PYPI_COLOURS)\n\n for field in FIELDS:\n field_data = info[field]\n\n # Field could be completely empty, in some cases can be a string with whitespaces, or None.\n if field_data and not field_data.isspace():\n if '\\n' in field_data and field == \"license\":\n field_data = field_data.split('\\n')[0]\n\n embed.add_field(\n name=field.replace(\"_\", \" \").title(),\n value=escape_markdown(field_data),\n inline=False,\n )\n\n else:\n embed.description = \"There was an error when fetching your PyPi package.\"\n log.trace(f\"Error when fetching PyPi package: {response.status}.\")\n\n await ctx.send(embed=embed)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the PyPi cog.\"\"\"\n bot.add_cog(PyPi(bot))\n", "path": "bot/exts/info/pypi.py"}], "after_files": [{"content": "import itertools\nimport logging\nimport random\nimport re\n\nfrom discord import Embed\nfrom discord.ext.commands import Cog, Context, command\nfrom discord.utils import escape_markdown\n\nfrom bot.bot import Bot\nfrom bot.constants import Colours, NEGATIVE_REPLIES, RedirectOutput\n\nURL = \"https://pypi.org/pypi/{package}/json\"\nFIELDS = (\"author\", \"requires_python\", \"summary\", \"license\")\nPYPI_ICON = \"https://cdn.discordapp.com/emojis/766274397257334814.png\"\n\nPYPI_COLOURS = itertools.cycle((Colours.yellow, Colours.blue, Colours.white))\n\nILLEGAL_CHARACTERS = re.compile(r\"[^a-zA-Z0-9-.]+\")\nINVALID_INPUT_DELETE_DELAY = RedirectOutput.delete_delay\n\nlog = logging.getLogger(__name__)\n\n\nclass PyPi(Cog):\n \"\"\"Cog for getting information about PyPi packages.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @command(name=\"pypi\", aliases=(\"package\", \"pack\"))\n async def get_package_info(self, ctx: Context, package: str) -> None:\n \"\"\"Provide information about a specific package from PyPI.\"\"\"\n embed = Embed(\n title=random.choice(NEGATIVE_REPLIES),\n colour=Colours.soft_red\n )\n embed.set_thumbnail(url=PYPI_ICON)\n\n error = True\n\n if (character := re.search(ILLEGAL_CHARACTERS, package)) is not None:\n embed.description = f\"Illegal character passed into command: '{escape_markdown(character.group(0))}'\"\n\n else:\n async with self.bot.http_session.get(URL.format(package=package)) as response:\n if response.status == 404:\n embed.description = \"Package could not be found.\"\n\n elif response.status == 200 and response.content_type == \"application/json\":\n response_json = await response.json()\n info = response_json[\"info\"]\n\n embed.title = f\"{info['name']} v{info['version']}\"\n embed.url = info['package_url']\n embed.colour = next(PYPI_COLOURS)\n\n for field in FIELDS:\n field_data = info[field]\n\n # Field could be completely empty, in some cases can be a string with whitespaces, or None.\n if field_data and not field_data.isspace():\n if '\\n' in field_data and field == \"license\":\n field_data = field_data.split('\\n')[0]\n\n embed.add_field(\n name=field.replace(\"_\", \" \").title(),\n value=escape_markdown(field_data),\n inline=False,\n )\n\n error = False\n\n else:\n embed.description = \"There was an error when fetching your PyPi package.\"\n log.trace(f\"Error when fetching PyPi package: {response.status}.\")\n\n if error:\n await ctx.send(embed=embed, delete_after=INVALID_INPUT_DELETE_DELAY)\n await ctx.message.delete(delay=INVALID_INPUT_DELETE_DELAY)\n else:\n await ctx.send(embed=embed)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the PyPi cog.\"\"\"\n bot.add_cog(PyPi(bot))\n", "path": "bot/exts/info/pypi.py"}]} | 1,219 | 924 |
gh_patches_debug_58079 | rasdani/github-patches | git_diff | secondmind-labs__trieste-140 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Ensure type information is packaged with installable
**Describe the feature you'd like**
As a user, I want to be able to type check my code that calls into trieste. It's not clear at the moment if this is currently possible, given there is no py.typed file.
See the python PEP for more info https://www.python.org/dev/peps/pep-0561/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2020 The Trieste Contributors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from setuptools import find_packages, setup
16
17 with open("README.md", "r") as file:
18 long_description = file.read()
19
20 setup(
21 name="trieste",
22 version="0.3.1",
23 author="The Trieste contributors",
24 author_email="[email protected]",
25 description="A Bayesian optimization research toolbox built on TensorFlow",
26 long_description=long_description,
27 long_description_content_type="text/markdown",
28 url="https://github.com/secondmind-labs/trieste",
29 packages=find_packages(include=("trieste*",)),
30 classifiers=[
31 "Programming Language :: Python :: 3.7",
32 "License :: OSI Approved :: Apache Software License",
33 "Operating System :: OS Independent",
34 ],
35 python_requires="~=3.7",
36 install_requires=[
37 "absl-py",
38 "gpflow==2.1.*",
39 "numpy",
40 # tensorflow!=2.2.0,!=2.3.0 because of https://github.com/advisories/GHSA-8fxw-76px-3rxv
41 "tensorflow>=2.1,!=2.2.0,!=2.3.0",
42 "tensorflow-probability>=0.9",
43 ],
44 )
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -27,6 +27,9 @@
long_description_content_type="text/markdown",
url="https://github.com/secondmind-labs/trieste",
packages=find_packages(include=("trieste*",)),
+ package_data={
+ "trieste": ["py.typed"],
+ },
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -27,6 +27,9 @@\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/secondmind-labs/trieste\",\n packages=find_packages(include=(\"trieste*\",)),\n+ package_data={\n+ \"trieste\": [\"py.typed\"],\n+ },\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"License :: OSI Approved :: Apache Software License\",\n", "issue": "Ensure type information is packaged with installable\n**Describe the feature you'd like**\r\nAs a user, I want to be able to type check my code that calls into trieste. It's not clear at the moment if this is currently possible, given there is no py.typed file.\r\n\r\nSee the python PEP for more info https://www.python.org/dev/peps/pep-0561/\n", "before_files": [{"content": "# Copyright 2020 The Trieste Contributors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages, setup\n\nwith open(\"README.md\", \"r\") as file:\n long_description = file.read()\n\nsetup(\n name=\"trieste\",\n version=\"0.3.1\",\n author=\"The Trieste contributors\",\n author_email=\"[email protected]\",\n description=\"A Bayesian optimization research toolbox built on TensorFlow\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/secondmind-labs/trieste\",\n packages=find_packages(include=(\"trieste*\",)),\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\"~=3.7\",\n install_requires=[\n \"absl-py\",\n \"gpflow==2.1.*\",\n \"numpy\",\n # tensorflow!=2.2.0,!=2.3.0 because of https://github.com/advisories/GHSA-8fxw-76px-3rxv\n \"tensorflow>=2.1,!=2.2.0,!=2.3.0\",\n \"tensorflow-probability>=0.9\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2020 The Trieste Contributors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import find_packages, setup\n\nwith open(\"README.md\", \"r\") as file:\n long_description = file.read()\n\nsetup(\n name=\"trieste\",\n version=\"0.3.1\",\n author=\"The Trieste contributors\",\n author_email=\"[email protected]\",\n description=\"A Bayesian optimization research toolbox built on TensorFlow\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/secondmind-labs/trieste\",\n packages=find_packages(include=(\"trieste*\",)),\n package_data={\n \"trieste\": [\"py.typed\"],\n },\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\"~=3.7\",\n install_requires=[\n \"absl-py\",\n \"gpflow==2.1.*\",\n \"numpy\",\n # tensorflow!=2.2.0,!=2.3.0 because of https://github.com/advisories/GHSA-8fxw-76px-3rxv\n \"tensorflow>=2.1,!=2.2.0,!=2.3.0\",\n \"tensorflow-probability>=0.9\",\n ],\n)\n", "path": "setup.py"}]} | 823 | 115 |
gh_patches_debug_10804 | rasdani/github-patches | git_diff | MycroftAI__mycroft-core-2985 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add note about legal issues when using gTTS in public or commercial projects
originally opened in the OVOS plugin repo
> I've just posted an issue about potential legal issues with this API in the gTTS repo: [pndurette/gTTS#309](https://github.com/pndurette/gTTS/issues/309)
>
> From the plugin description people will only understand that the service can break anytime but there are more risks. Imagine someone will build a system that goes into "production" with this plugin active (you know how people are ^^) and commercializes it. In the best case Google will just ask the owner to shut it down, in the worst case they will get sued :-/.
>
> Personally I don't think an undocumented and unstable API moving in a legal gray area should be included in a system called OpenVoiceOS. For the same reasons it is not part of SEPIA OpenAssistant Framework.
>
> That said I know first hand how hard it is to find an alternative that has comparable quality, is free (=self-hosted) and fast enough to run on Raspberry Pi etc., but I think services like gTTS are just a distraction on the path to become really "open"!
links:
https://github.com/OpenVoiceOS/ovos-tts-plugin-google-tx/issues/1
https://github.com/pndurette/gTTS/issues/309
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mycroft/tts/google_tts.py`
Content:
```
1 # Copyright 2017 Mycroft AI Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 from gtts import gTTS
16 from gtts.lang import tts_langs
17
18 from .tts import TTS, TTSValidator
19
20 from mycroft.util.log import LOG
21
22 # Live list of languages
23 # Cached list of supported languages (2021-02-09)
24 _default_langs = {'af': 'Afrikaans', 'sq': 'Albanian', 'ar': 'Arabic',
25 'hy': 'Armenian', 'bn': 'Bengali', 'bs': 'Bosnian',
26 'ca': 'Catalan', 'hr': 'Croatian', 'cs': 'Czech',
27 'da': 'Danish', 'nl': 'Dutch', 'en': 'English',
28 'eo': 'Esperanto', 'et': 'Estonian', 'tl': 'Filipino',
29 'fi': 'Finnish', 'fr': 'French', 'de': 'German',
30 'el': 'Greek', 'gu': 'Gujarati', 'hi': 'Hindi',
31 'hu': 'Hungarian', 'is': 'Icelandic', 'id': 'Indonesian',
32 'it': 'Italian', 'ja': 'Japanese', 'jw': 'Javanese',
33 'kn': 'Kannada', 'km': 'Khmer', 'ko': 'Korean',
34 'la': 'Latin', 'lv': 'Latvian', 'mk': 'Macedonian',
35 'ml': 'Malayalam', 'mr': 'Marathi',
36 'my': 'Myanmar (Burmese)', 'ne': 'Nepali',
37 'no': 'Norwegian', 'pl': 'Polish', 'pt': 'Portuguese',
38 'ro': 'Romanian', 'ru': 'Russian', 'sr': 'Serbian',
39 'si': 'Sinhala', 'sk': 'Slovak', 'es': 'Spanish',
40 'su': 'Sundanese', 'sw': 'Swahili', 'sv': 'Swedish',
41 'ta': 'Tamil', 'te': 'Telugu', 'th': 'Thai', 'tr': 'Turkish',
42 'uk': 'Ukrainian', 'ur': 'Urdu', 'vi': 'Vietnamese',
43 'cy': 'Welsh', 'zh': 'Chinese (Mandarin/China)'
44 }
45
46
47 _supported_langs = None
48
49
50 def get_supported_langs():
51 """Get dict of supported languages.
52
53 Tries to fetch remote list, if that fails a local cache will be used.
54
55 Returns:
56 (dict): Lang code to lang name map.
57 """
58 global _supported_langs
59 if not _supported_langs:
60 try:
61 _supported_langs = tts_langs()
62 except Exception:
63 LOG.warning('Couldn\'t fetch upto date language codes')
64 return _supported_langs or _default_langs
65
66
67 class GoogleTTS(TTS):
68 """Interface to google TTS."""
69 def __init__(self, lang, config):
70 self._google_lang = None
71 super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator(
72 self), 'mp3')
73
74 @property
75 def google_lang(self):
76 """Property containing a converted language code suitable for gTTS."""
77 supported_langs = get_supported_langs()
78 if not self._google_lang:
79 if self.lang.lower() in supported_langs:
80 self._google_lang = self.lang.lower()
81 elif self.lang[:2].lower() in supported_langs:
82 self._google_lang = self.lang[:2]
83 return self._google_lang or self.lang.lower()
84
85 def get_tts(self, sentence, wav_file):
86 """Fetch tts audio using gTTS.
87
88 Args:
89 sentence (str): Sentence to generate audio for
90 wav_file (str): output file path
91 Returns:
92 Tuple ((str) written file, None)
93 """
94 tts = gTTS(text=sentence, lang=self.google_lang)
95 tts.save(wav_file)
96 return (wav_file, None) # No phonemes
97
98
99 class GoogleTTSValidator(TTSValidator):
100 def __init__(self, tts):
101 super(GoogleTTSValidator, self).__init__(tts)
102
103 def validate_lang(self):
104 lang = self.tts.google_lang
105 if lang.lower() not in get_supported_langs():
106 raise ValueError("Language not supported by gTTS: {}".format(lang))
107
108 def validate_connection(self):
109 try:
110 gTTS(text='Hi').save(self.tts.filename)
111 except Exception:
112 raise Exception(
113 'GoogleTTS server could not be verified. Please check your '
114 'internet connection.')
115
116 def get_tts_class(self):
117 return GoogleTTS
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mycroft/tts/google_tts.py b/mycroft/tts/google_tts.py
--- a/mycroft/tts/google_tts.py
+++ b/mycroft/tts/google_tts.py
@@ -70,6 +70,13 @@
self._google_lang = None
super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator(
self), 'mp3')
+ LOG.warning(
+ "The Google TTS module uses the gTTS Python package which itself "
+ "interfaces with the Google Translate text-to-speech API. This is "
+ "not intended for commercial or production usage. The service "
+ "may break at any time, and you are subject to their Terms of "
+ "Service that can be found at https://policies.google.com/terms"
+ )
@property
def google_lang(self):
| {"golden_diff": "diff --git a/mycroft/tts/google_tts.py b/mycroft/tts/google_tts.py\n--- a/mycroft/tts/google_tts.py\n+++ b/mycroft/tts/google_tts.py\n@@ -70,6 +70,13 @@\n self._google_lang = None\n super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator(\n self), 'mp3')\n+ LOG.warning(\n+ \"The Google TTS module uses the gTTS Python package which itself \"\n+ \"interfaces with the Google Translate text-to-speech API. This is \"\n+ \"not intended for commercial or production usage. The service \"\n+ \"may break at any time, and you are subject to their Terms of \"\n+ \"Service that can be found at https://policies.google.com/terms\"\n+ )\n \n @property\n def google_lang(self):\n", "issue": "Add note about legal issues when using gTTS in public or commercial projects\noriginally opened in the OVOS plugin repo\r\n\r\n> I've just posted an issue about potential legal issues with this API in the gTTS repo: [pndurette/gTTS#309](https://github.com/pndurette/gTTS/issues/309)\r\n> \r\n> From the plugin description people will only understand that the service can break anytime but there are more risks. Imagine someone will build a system that goes into \"production\" with this plugin active (you know how people are ^^) and commercializes it. In the best case Google will just ask the owner to shut it down, in the worst case they will get sued :-/.\r\n> \r\n> Personally I don't think an undocumented and unstable API moving in a legal gray area should be included in a system called OpenVoiceOS. For the same reasons it is not part of SEPIA OpenAssistant Framework.\r\n> \r\n> That said I know first hand how hard it is to find an alternative that has comparable quality, is free (=self-hosted) and fast enough to run on Raspberry Pi etc., but I think services like gTTS are just a distraction on the path to become really \"open\"!\r\n\r\n\r\n\r\nlinks:\r\n\r\nhttps://github.com/OpenVoiceOS/ovos-tts-plugin-google-tx/issues/1\r\nhttps://github.com/pndurette/gTTS/issues/309\n", "before_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom gtts import gTTS\nfrom gtts.lang import tts_langs\n\nfrom .tts import TTS, TTSValidator\n\nfrom mycroft.util.log import LOG\n\n# Live list of languages\n# Cached list of supported languages (2021-02-09)\n_default_langs = {'af': 'Afrikaans', 'sq': 'Albanian', 'ar': 'Arabic',\n 'hy': 'Armenian', 'bn': 'Bengali', 'bs': 'Bosnian',\n 'ca': 'Catalan', 'hr': 'Croatian', 'cs': 'Czech',\n 'da': 'Danish', 'nl': 'Dutch', 'en': 'English',\n 'eo': 'Esperanto', 'et': 'Estonian', 'tl': 'Filipino',\n 'fi': 'Finnish', 'fr': 'French', 'de': 'German',\n 'el': 'Greek', 'gu': 'Gujarati', 'hi': 'Hindi',\n 'hu': 'Hungarian', 'is': 'Icelandic', 'id': 'Indonesian',\n 'it': 'Italian', 'ja': 'Japanese', 'jw': 'Javanese',\n 'kn': 'Kannada', 'km': 'Khmer', 'ko': 'Korean',\n 'la': 'Latin', 'lv': 'Latvian', 'mk': 'Macedonian',\n 'ml': 'Malayalam', 'mr': 'Marathi',\n 'my': 'Myanmar (Burmese)', 'ne': 'Nepali',\n 'no': 'Norwegian', 'pl': 'Polish', 'pt': 'Portuguese',\n 'ro': 'Romanian', 'ru': 'Russian', 'sr': 'Serbian',\n 'si': 'Sinhala', 'sk': 'Slovak', 'es': 'Spanish',\n 'su': 'Sundanese', 'sw': 'Swahili', 'sv': 'Swedish',\n 'ta': 'Tamil', 'te': 'Telugu', 'th': 'Thai', 'tr': 'Turkish',\n 'uk': 'Ukrainian', 'ur': 'Urdu', 'vi': 'Vietnamese',\n 'cy': 'Welsh', 'zh': 'Chinese (Mandarin/China)'\n }\n\n\n_supported_langs = None\n\n\ndef get_supported_langs():\n \"\"\"Get dict of supported languages.\n\n Tries to fetch remote list, if that fails a local cache will be used.\n\n Returns:\n (dict): Lang code to lang name map.\n \"\"\"\n global _supported_langs\n if not _supported_langs:\n try:\n _supported_langs = tts_langs()\n except Exception:\n LOG.warning('Couldn\\'t fetch upto date language codes')\n return _supported_langs or _default_langs\n\n\nclass GoogleTTS(TTS):\n \"\"\"Interface to google TTS.\"\"\"\n def __init__(self, lang, config):\n self._google_lang = None\n super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator(\n self), 'mp3')\n\n @property\n def google_lang(self):\n \"\"\"Property containing a converted language code suitable for gTTS.\"\"\"\n supported_langs = get_supported_langs()\n if not self._google_lang:\n if self.lang.lower() in supported_langs:\n self._google_lang = self.lang.lower()\n elif self.lang[:2].lower() in supported_langs:\n self._google_lang = self.lang[:2]\n return self._google_lang or self.lang.lower()\n\n def get_tts(self, sentence, wav_file):\n \"\"\"Fetch tts audio using gTTS.\n\n Args:\n sentence (str): Sentence to generate audio for\n wav_file (str): output file path\n Returns:\n Tuple ((str) written file, None)\n \"\"\"\n tts = gTTS(text=sentence, lang=self.google_lang)\n tts.save(wav_file)\n return (wav_file, None) # No phonemes\n\n\nclass GoogleTTSValidator(TTSValidator):\n def __init__(self, tts):\n super(GoogleTTSValidator, self).__init__(tts)\n\n def validate_lang(self):\n lang = self.tts.google_lang\n if lang.lower() not in get_supported_langs():\n raise ValueError(\"Language not supported by gTTS: {}\".format(lang))\n\n def validate_connection(self):\n try:\n gTTS(text='Hi').save(self.tts.filename)\n except Exception:\n raise Exception(\n 'GoogleTTS server could not be verified. Please check your '\n 'internet connection.')\n\n def get_tts_class(self):\n return GoogleTTS\n", "path": "mycroft/tts/google_tts.py"}], "after_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom gtts import gTTS\nfrom gtts.lang import tts_langs\n\nfrom .tts import TTS, TTSValidator\n\nfrom mycroft.util.log import LOG\n\n# Live list of languages\n# Cached list of supported languages (2021-02-09)\n_default_langs = {'af': 'Afrikaans', 'sq': 'Albanian', 'ar': 'Arabic',\n 'hy': 'Armenian', 'bn': 'Bengali', 'bs': 'Bosnian',\n 'ca': 'Catalan', 'hr': 'Croatian', 'cs': 'Czech',\n 'da': 'Danish', 'nl': 'Dutch', 'en': 'English',\n 'eo': 'Esperanto', 'et': 'Estonian', 'tl': 'Filipino',\n 'fi': 'Finnish', 'fr': 'French', 'de': 'German',\n 'el': 'Greek', 'gu': 'Gujarati', 'hi': 'Hindi',\n 'hu': 'Hungarian', 'is': 'Icelandic', 'id': 'Indonesian',\n 'it': 'Italian', 'ja': 'Japanese', 'jw': 'Javanese',\n 'kn': 'Kannada', 'km': 'Khmer', 'ko': 'Korean',\n 'la': 'Latin', 'lv': 'Latvian', 'mk': 'Macedonian',\n 'ml': 'Malayalam', 'mr': 'Marathi',\n 'my': 'Myanmar (Burmese)', 'ne': 'Nepali',\n 'no': 'Norwegian', 'pl': 'Polish', 'pt': 'Portuguese',\n 'ro': 'Romanian', 'ru': 'Russian', 'sr': 'Serbian',\n 'si': 'Sinhala', 'sk': 'Slovak', 'es': 'Spanish',\n 'su': 'Sundanese', 'sw': 'Swahili', 'sv': 'Swedish',\n 'ta': 'Tamil', 'te': 'Telugu', 'th': 'Thai', 'tr': 'Turkish',\n 'uk': 'Ukrainian', 'ur': 'Urdu', 'vi': 'Vietnamese',\n 'cy': 'Welsh', 'zh': 'Chinese (Mandarin/China)'\n }\n\n\n_supported_langs = None\n\n\ndef get_supported_langs():\n \"\"\"Get dict of supported languages.\n\n Tries to fetch remote list, if that fails a local cache will be used.\n\n Returns:\n (dict): Lang code to lang name map.\n \"\"\"\n global _supported_langs\n if not _supported_langs:\n try:\n _supported_langs = tts_langs()\n except Exception:\n LOG.warning('Couldn\\'t fetch upto date language codes')\n return _supported_langs or _default_langs\n\n\nclass GoogleTTS(TTS):\n \"\"\"Interface to google TTS.\"\"\"\n def __init__(self, lang, config):\n self._google_lang = None\n super(GoogleTTS, self).__init__(lang, config, GoogleTTSValidator(\n self), 'mp3')\n LOG.warning(\n \"The Google TTS module uses the gTTS Python package which itself \"\n \"interfaces with the Google Translate text-to-speech API. This is \"\n \"not intended for commercial or production usage. The service \"\n \"may break at any time, and you are subject to their Terms of \"\n \"Service that can be found at https://policies.google.com/terms\"\n )\n\n @property\n def google_lang(self):\n \"\"\"Property containing a converted language code suitable for gTTS.\"\"\"\n supported_langs = get_supported_langs()\n if not self._google_lang:\n if self.lang.lower() in supported_langs:\n self._google_lang = self.lang.lower()\n elif self.lang[:2].lower() in supported_langs:\n self._google_lang = self.lang[:2]\n return self._google_lang or self.lang.lower()\n\n def get_tts(self, sentence, wav_file):\n \"\"\"Fetch tts audio using gTTS.\n\n Args:\n sentence (str): Sentence to generate audio for\n wav_file (str): output file path\n Returns:\n Tuple ((str) written file, None)\n \"\"\"\n tts = gTTS(text=sentence, lang=self.google_lang)\n tts.save(wav_file)\n return (wav_file, None) # No phonemes\n\n\nclass GoogleTTSValidator(TTSValidator):\n def __init__(self, tts):\n super(GoogleTTSValidator, self).__init__(tts)\n\n def validate_lang(self):\n lang = self.tts.google_lang\n if lang.lower() not in get_supported_langs():\n raise ValueError(\"Language not supported by gTTS: {}\".format(lang))\n\n def validate_connection(self):\n try:\n gTTS(text='Hi').save(self.tts.filename)\n except Exception:\n raise Exception(\n 'GoogleTTS server could not be verified. Please check your '\n 'internet connection.')\n\n def get_tts_class(self):\n return GoogleTTS\n", "path": "mycroft/tts/google_tts.py"}]} | 1,991 | 195 |
gh_patches_debug_27808 | rasdani/github-patches | git_diff | sopel-irc__sopel-2069 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support 2-pronoun sets in the pronoun module
### The problem
Setting pronouns in the pronouns module can be unintuitive because the bot accepts either a 1-pronoun "object" command (`.setpronouns she`) or a long 5-pronouns "subject/object/possessive-determiner/possessive-pronoun/reflexive" commnad (`.setpronouns she/her/her/hers/herself`). However, in my experience most people are used to the 2-pronoun convention of "subject/object" `.setpronouns she/her`).
### The solution
Supporting 2-word version of pronouns.
### Alternatives
An alternative would be to improve the message for unrecognised pronoun sets by adding explicit instructions to use either 1- or 5-pronoun sets.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/modules/pronouns.py`
Content:
```
1 # coding=utf-8
2 """
3 pronouns.py - Sopel Pronouns Plugin
4 Copyright © 2016, Elsie Powell
5 Licensed under the Eiffel Forum License 2.
6
7 https://sopel.chat
8 """
9 from __future__ import absolute_import, division, print_function, unicode_literals
10
11 from sopel import plugin
12
13
14 # Copied from pronoun.is, leaving a *lot* out. If
15 # https://github.com/witch-house/pronoun.is/pull/96 gets merged, using that
16 # would be a lot easier.
17 KNOWN_SETS = {
18 'ze': 'ze/hir/hir/hirs/hirself',
19 'ze/hir': 'ze/hir/hir/hirs/hirself',
20 'ze/zir': 'ze/zir/zir/zirs/zirself',
21 'they': 'they/them/their/theirs/themselves',
22 'they/.../themselves': 'they/them/their/theirs/themselves',
23 'they/.../themself': 'they/them/their/theirs/themself',
24 'she': 'she/her/her/hers/herself',
25 'he': 'he/him/his/his/himself',
26 'xey': 'xey/xem/xyr/xyrs/xemself',
27 'sie': 'sie/hir/hir/hirs/hirself',
28 'it': 'it/it/its/its/itself',
29 'ey': 'ey/em/eir/eirs/eirslef',
30 }
31
32
33 @plugin.command('pronouns')
34 @plugin.example('.pronouns Embolalia')
35 def pronouns(bot, trigger):
36 """Show the pronouns for a given user, defaulting to the current user if left blank."""
37 if not trigger.group(3):
38 pronouns = bot.db.get_nick_value(trigger.nick, 'pronouns')
39 if pronouns:
40 say_pronouns(bot, trigger.nick, pronouns)
41 else:
42 bot.reply("I don't know your pronouns! You can set them with "
43 "{}setpronouns".format(bot.config.core.help_prefix))
44 else:
45 pronouns = bot.db.get_nick_value(trigger.group(3), 'pronouns')
46 if pronouns:
47 say_pronouns(bot, trigger.group(3), pronouns)
48 elif trigger.group(3) == bot.nick:
49 # You can stuff an entry into the database manually for your bot's
50 # gender, but like… it's a bot.
51 bot.say(
52 "I am a bot. Beep boop. My pronouns are it/it/its/its/itself. "
53 "See https://pronoun.is/it for examples."
54 )
55 else:
56 bot.reply("I don't know {}'s pronouns. They can set them with "
57 "{}setpronouns".format(trigger.group(3),
58 bot.config.core.help_prefix))
59
60
61 def say_pronouns(bot, nick, pronouns):
62 for short, set_ in KNOWN_SETS.items():
63 if pronouns == set_:
64 break
65 short = pronouns
66
67 bot.say("{}'s pronouns are {}. See https://pronoun.is/{} for "
68 "examples.".format(nick, pronouns, short))
69
70
71 @plugin.command('setpronouns')
72 @plugin.example('.setpronouns they/them/their/theirs/themselves')
73 def set_pronouns(bot, trigger):
74 pronouns = trigger.group(2)
75 """Set your pronouns."""
76 if not pronouns:
77 bot.reply('What pronouns do you use?')
78 return
79
80 disambig = ''
81 if pronouns == 'they':
82 disambig = ' You can also use they/.../themself, if you prefer.'
83 pronouns = KNOWN_SETS.get(pronouns)
84 elif pronouns == 'ze':
85 disambig = ' I have ze/hir. If you meant ze/zir, you can use that instead.'
86 pronouns = KNOWN_SETS.get(pronouns)
87 elif len(pronouns.split('/')) != 5:
88 pronouns = KNOWN_SETS.get(pronouns)
89 if not pronouns:
90 bot.reply(
91 "I'm sorry, I don't know those pronouns. "
92 "You can give me a set I don't know by formatting it "
93 "subject/object/possessive-determiner/possessive-pronoun/"
94 "reflexive, as in: they/them/their/theirs/themselves"
95 )
96 return
97 bot.db.set_nick_value(trigger.nick, 'pronouns', pronouns)
98 bot.reply("Thanks for telling me!" + disambig)
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sopel/modules/pronouns.py b/sopel/modules/pronouns.py
--- a/sopel/modules/pronouns.py
+++ b/sopel/modules/pronouns.py
@@ -19,14 +19,21 @@
'ze/hir': 'ze/hir/hir/hirs/hirself',
'ze/zir': 'ze/zir/zir/zirs/zirself',
'they': 'they/them/their/theirs/themselves',
+ 'they/them': 'they/them/their/theirs/themselves',
'they/.../themselves': 'they/them/their/theirs/themselves',
'they/.../themself': 'they/them/their/theirs/themself',
'she': 'she/her/her/hers/herself',
+ 'she/her': 'she/her/her/hers/herself',
'he': 'he/him/his/his/himself',
+ 'he/him': 'he/him/his/his/himself',
'xey': 'xey/xem/xyr/xyrs/xemself',
+ 'xey/xem': 'xey/xem/xyr/xyrs/xemself',
'sie': 'sie/hir/hir/hirs/hirself',
+ 'sie/hir': 'sie/hir/hir/hirs/hirself',
'it': 'it/it/its/its/itself',
- 'ey': 'ey/em/eir/eirs/eirslef',
+ 'it/it': 'it/it/its/its/itself',
+ 'ey': 'ey/em/eir/eirs/eirself',
+ 'ey/em': 'ey/em/eir/eirs/eirself',
}
@@ -71,8 +78,8 @@
@plugin.command('setpronouns')
@plugin.example('.setpronouns they/them/their/theirs/themselves')
def set_pronouns(bot, trigger):
- pronouns = trigger.group(2)
"""Set your pronouns."""
+ pronouns = trigger.group(2)
if not pronouns:
bot.reply('What pronouns do you use?')
return
| {"golden_diff": "diff --git a/sopel/modules/pronouns.py b/sopel/modules/pronouns.py\n--- a/sopel/modules/pronouns.py\n+++ b/sopel/modules/pronouns.py\n@@ -19,14 +19,21 @@\n 'ze/hir': 'ze/hir/hir/hirs/hirself',\n 'ze/zir': 'ze/zir/zir/zirs/zirself',\n 'they': 'they/them/their/theirs/themselves',\n+ 'they/them': 'they/them/their/theirs/themselves',\n 'they/.../themselves': 'they/them/their/theirs/themselves',\n 'they/.../themself': 'they/them/their/theirs/themself',\n 'she': 'she/her/her/hers/herself',\n+ 'she/her': 'she/her/her/hers/herself',\n 'he': 'he/him/his/his/himself',\n+ 'he/him': 'he/him/his/his/himself',\n 'xey': 'xey/xem/xyr/xyrs/xemself',\n+ 'xey/xem': 'xey/xem/xyr/xyrs/xemself',\n 'sie': 'sie/hir/hir/hirs/hirself',\n+ 'sie/hir': 'sie/hir/hir/hirs/hirself',\n 'it': 'it/it/its/its/itself',\n- 'ey': 'ey/em/eir/eirs/eirslef',\n+ 'it/it': 'it/it/its/its/itself',\n+ 'ey': 'ey/em/eir/eirs/eirself',\n+ 'ey/em': 'ey/em/eir/eirs/eirself',\n }\n \n \n@@ -71,8 +78,8 @@\n @plugin.command('setpronouns')\n @plugin.example('.setpronouns they/them/their/theirs/themselves')\n def set_pronouns(bot, trigger):\n- pronouns = trigger.group(2)\n \"\"\"Set your pronouns.\"\"\"\n+ pronouns = trigger.group(2)\n if not pronouns:\n bot.reply('What pronouns do you use?')\n return\n", "issue": "Support 2-pronoun sets in the pronoun module \n### The problem\r\n\r\nSetting pronouns in the pronouns module can be unintuitive because the bot accepts either a 1-pronoun \"object\" command (`.setpronouns she`) or a long 5-pronouns \"subject/object/possessive-determiner/possessive-pronoun/reflexive\" commnad (`.setpronouns she/her/her/hers/herself`). However, in my experience most people are used to the 2-pronoun convention of \"subject/object\" `.setpronouns she/her`). \r\n\r\n### The solution\r\n\r\nSupporting 2-word version of pronouns. \r\n\r\n### Alternatives\r\n\r\nAn alternative would be to improve the message for unrecognised pronoun sets by adding explicit instructions to use either 1- or 5-pronoun sets. \r\n\r\n\r\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\npronouns.py - Sopel Pronouns Plugin\nCopyright \u00a9 2016, Elsie Powell\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom sopel import plugin\n\n\n# Copied from pronoun.is, leaving a *lot* out. If\n# https://github.com/witch-house/pronoun.is/pull/96 gets merged, using that\n# would be a lot easier.\nKNOWN_SETS = {\n 'ze': 'ze/hir/hir/hirs/hirself',\n 'ze/hir': 'ze/hir/hir/hirs/hirself',\n 'ze/zir': 'ze/zir/zir/zirs/zirself',\n 'they': 'they/them/their/theirs/themselves',\n 'they/.../themselves': 'they/them/their/theirs/themselves',\n 'they/.../themself': 'they/them/their/theirs/themself',\n 'she': 'she/her/her/hers/herself',\n 'he': 'he/him/his/his/himself',\n 'xey': 'xey/xem/xyr/xyrs/xemself',\n 'sie': 'sie/hir/hir/hirs/hirself',\n 'it': 'it/it/its/its/itself',\n 'ey': 'ey/em/eir/eirs/eirslef',\n}\n\n\[email protected]('pronouns')\[email protected]('.pronouns Embolalia')\ndef pronouns(bot, trigger):\n \"\"\"Show the pronouns for a given user, defaulting to the current user if left blank.\"\"\"\n if not trigger.group(3):\n pronouns = bot.db.get_nick_value(trigger.nick, 'pronouns')\n if pronouns:\n say_pronouns(bot, trigger.nick, pronouns)\n else:\n bot.reply(\"I don't know your pronouns! You can set them with \"\n \"{}setpronouns\".format(bot.config.core.help_prefix))\n else:\n pronouns = bot.db.get_nick_value(trigger.group(3), 'pronouns')\n if pronouns:\n say_pronouns(bot, trigger.group(3), pronouns)\n elif trigger.group(3) == bot.nick:\n # You can stuff an entry into the database manually for your bot's\n # gender, but like\u2026 it's a bot.\n bot.say(\n \"I am a bot. Beep boop. My pronouns are it/it/its/its/itself. \"\n \"See https://pronoun.is/it for examples.\"\n )\n else:\n bot.reply(\"I don't know {}'s pronouns. They can set them with \"\n \"{}setpronouns\".format(trigger.group(3),\n bot.config.core.help_prefix))\n\n\ndef say_pronouns(bot, nick, pronouns):\n for short, set_ in KNOWN_SETS.items():\n if pronouns == set_:\n break\n short = pronouns\n\n bot.say(\"{}'s pronouns are {}. See https://pronoun.is/{} for \"\n \"examples.\".format(nick, pronouns, short))\n\n\[email protected]('setpronouns')\[email protected]('.setpronouns they/them/their/theirs/themselves')\ndef set_pronouns(bot, trigger):\n pronouns = trigger.group(2)\n \"\"\"Set your pronouns.\"\"\"\n if not pronouns:\n bot.reply('What pronouns do you use?')\n return\n\n disambig = ''\n if pronouns == 'they':\n disambig = ' You can also use they/.../themself, if you prefer.'\n pronouns = KNOWN_SETS.get(pronouns)\n elif pronouns == 'ze':\n disambig = ' I have ze/hir. If you meant ze/zir, you can use that instead.'\n pronouns = KNOWN_SETS.get(pronouns)\n elif len(pronouns.split('/')) != 5:\n pronouns = KNOWN_SETS.get(pronouns)\n if not pronouns:\n bot.reply(\n \"I'm sorry, I don't know those pronouns. \"\n \"You can give me a set I don't know by formatting it \"\n \"subject/object/possessive-determiner/possessive-pronoun/\"\n \"reflexive, as in: they/them/their/theirs/themselves\"\n )\n return\n bot.db.set_nick_value(trigger.nick, 'pronouns', pronouns)\n bot.reply(\"Thanks for telling me!\" + disambig)\n", "path": "sopel/modules/pronouns.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"\npronouns.py - Sopel Pronouns Plugin\nCopyright \u00a9 2016, Elsie Powell\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom sopel import plugin\n\n\n# Copied from pronoun.is, leaving a *lot* out. If\n# https://github.com/witch-house/pronoun.is/pull/96 gets merged, using that\n# would be a lot easier.\nKNOWN_SETS = {\n 'ze': 'ze/hir/hir/hirs/hirself',\n 'ze/hir': 'ze/hir/hir/hirs/hirself',\n 'ze/zir': 'ze/zir/zir/zirs/zirself',\n 'they': 'they/them/their/theirs/themselves',\n 'they/them': 'they/them/their/theirs/themselves',\n 'they/.../themselves': 'they/them/their/theirs/themselves',\n 'they/.../themself': 'they/them/their/theirs/themself',\n 'she': 'she/her/her/hers/herself',\n 'she/her': 'she/her/her/hers/herself',\n 'he': 'he/him/his/his/himself',\n 'he/him': 'he/him/his/his/himself',\n 'xey': 'xey/xem/xyr/xyrs/xemself',\n 'xey/xem': 'xey/xem/xyr/xyrs/xemself',\n 'sie': 'sie/hir/hir/hirs/hirself',\n 'sie/hir': 'sie/hir/hir/hirs/hirself',\n 'it': 'it/it/its/its/itself',\n 'it/it': 'it/it/its/its/itself',\n 'ey': 'ey/em/eir/eirs/eirself',\n 'ey/em': 'ey/em/eir/eirs/eirself',\n}\n\n\[email protected]('pronouns')\[email protected]('.pronouns Embolalia')\ndef pronouns(bot, trigger):\n \"\"\"Show the pronouns for a given user, defaulting to the current user if left blank.\"\"\"\n if not trigger.group(3):\n pronouns = bot.db.get_nick_value(trigger.nick, 'pronouns')\n if pronouns:\n say_pronouns(bot, trigger.nick, pronouns)\n else:\n bot.reply(\"I don't know your pronouns! You can set them with \"\n \"{}setpronouns\".format(bot.config.core.help_prefix))\n else:\n pronouns = bot.db.get_nick_value(trigger.group(3), 'pronouns')\n if pronouns:\n say_pronouns(bot, trigger.group(3), pronouns)\n elif trigger.group(3) == bot.nick:\n # You can stuff an entry into the database manually for your bot's\n # gender, but like\u2026 it's a bot.\n bot.say(\n \"I am a bot. Beep boop. My pronouns are it/it/its/its/itself. \"\n \"See https://pronoun.is/it for examples.\"\n )\n else:\n bot.reply(\"I don't know {}'s pronouns. They can set them with \"\n \"{}setpronouns\".format(trigger.group(3),\n bot.config.core.help_prefix))\n\n\ndef say_pronouns(bot, nick, pronouns):\n for short, set_ in KNOWN_SETS.items():\n if pronouns == set_:\n break\n short = pronouns\n\n bot.say(\"{}'s pronouns are {}. See https://pronoun.is/{} for \"\n \"examples.\".format(nick, pronouns, short))\n\n\[email protected]('setpronouns')\[email protected]('.setpronouns they/them/their/theirs/themselves')\ndef set_pronouns(bot, trigger):\n \"\"\"Set your pronouns.\"\"\"\n pronouns = trigger.group(2)\n if not pronouns:\n bot.reply('What pronouns do you use?')\n return\n\n disambig = ''\n if pronouns == 'they':\n disambig = ' You can also use they/.../themself, if you prefer.'\n pronouns = KNOWN_SETS.get(pronouns)\n elif pronouns == 'ze':\n disambig = ' I have ze/hir. If you meant ze/zir, you can use that instead.'\n pronouns = KNOWN_SETS.get(pronouns)\n elif len(pronouns.split('/')) != 5:\n pronouns = KNOWN_SETS.get(pronouns)\n if not pronouns:\n bot.reply(\n \"I'm sorry, I don't know those pronouns. \"\n \"You can give me a set I don't know by formatting it \"\n \"subject/object/possessive-determiner/possessive-pronoun/\"\n \"reflexive, as in: they/them/their/theirs/themselves\"\n )\n return\n bot.db.set_nick_value(trigger.nick, 'pronouns', pronouns)\n bot.reply(\"Thanks for telling me!\" + disambig)\n", "path": "sopel/modules/pronouns.py"}]} | 1,649 | 496 |
gh_patches_debug_21787 | rasdani/github-patches | git_diff | mozilla__pontoon-3121 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Warn on empty Fluent rich editor fields
> In addition to the bug, I believe this is a UX regression? We should show a warning that one of the values is empty when trying to save such message.
The current practice is to consider any content in a Fluent value or attribute to qualify the whole message as not-empty. The check for this was added in #2675. Previously empty attributes may indeed have resulted in errors, due to stringifying as invalid Fluent.
_Originally posted by @eemeli in https://github.com/mozilla/pontoon/issues/2973#issuecomment-1750344569_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pontoon/checks/libraries/pontoon_db.py`
Content:
```
1 import html
2 import re
3
4 import bleach
5
6 from collections import defaultdict
7 from fluent.syntax import FluentParser, ast
8 from fluent.syntax.visitor import Visitor
9
10 from pontoon.sync.formats.ftl import localizable_entries
11
12
13 MAX_LENGTH_RE = re.compile(r"MAX_LENGTH:( *)(\d+)", re.MULTILINE)
14 parser = FluentParser()
15
16
17 def get_max_length(comment):
18 """
19 Return max length value for an entity with MAX_LENTH.
20 """
21 max_length = re.findall(MAX_LENGTH_RE, comment or "")
22
23 if max_length:
24 return int(max_length[0][1])
25
26 return None
27
28
29 class IsEmptyVisitor(Visitor):
30 def __init__(self):
31 self.is_empty = True
32
33 def visit_Placeable(self, node):
34 if isinstance(node.expression, ast.Literal):
35 if node.expression.parse()["value"]:
36 self.is_empty = False
37 elif isinstance(node.expression, ast.SelectExpression):
38 self.generic_visit(node.expression)
39 else:
40 self.is_empty = False
41
42 def visit_TextElement(self, node):
43 if node.value:
44 self.is_empty = False
45
46
47 def run_checks(entity, original, string):
48 """
49 Group all checks related to the base UI that get stored in the DB
50 :arg pontoon.base.models.Entity entity: Source entity
51 :arg basestring original: an original string
52 :arg basestring string: a translation
53 """
54 checks = defaultdict(list)
55 resource_ext = entity.resource.format
56
57 if resource_ext == "lang":
58 # Newlines are not allowed in .lang files (bug 1190754)
59 if "\n" in string:
60 checks["pErrors"].append("Newline characters are not allowed")
61
62 # Prevent translations exceeding the given length limit
63 max_length = get_max_length(entity.comment)
64
65 if max_length:
66 string_length = len(
67 html.unescape(bleach.clean(string, strip=True, tags=()))
68 )
69
70 if string_length > max_length:
71 checks["pErrors"].append("Translation too long")
72
73 # Bug 1599056: Original and translation must either both end in a newline,
74 # or none of them should.
75 if resource_ext == "po":
76 if original.endswith("\n") != string.endswith("\n"):
77 checks["pErrors"].append("Ending newline mismatch")
78
79 # Prevent empty translation submissions if not supported
80 if string == "" and not entity.resource.allows_empty_translations:
81 checks["pErrors"].append("Empty translations are not allowed")
82
83 # FTL checks
84 if resource_ext == "ftl" and string != "":
85 translation_ast = parser.parse_entry(string)
86 entity_ast = parser.parse_entry(entity.string)
87
88 # Parse error
89 if isinstance(translation_ast, ast.Junk):
90 checks["pErrors"].append(translation_ast.annotations[0].message)
91
92 # Not a localizable entry
93 elif not isinstance(translation_ast, localizable_entries):
94 checks["pErrors"].append(
95 "Translation needs to be a valid localizable entry"
96 )
97
98 # Message ID mismatch
99 elif entity_ast.id.name != translation_ast.id.name:
100 checks["pErrors"].append("Translation key needs to match source string key")
101
102 # Empty translation entry warning; set here rather than pontoon_non_db.py
103 # to avoid needing to parse the Fluent message twice.
104 else:
105 visitor = IsEmptyVisitor()
106 visitor.visit(translation_ast)
107 if visitor.is_empty:
108 checks["pndbWarnings"].append("Empty translation")
109
110 return checks
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pontoon/checks/libraries/pontoon_db.py b/pontoon/checks/libraries/pontoon_db.py
--- a/pontoon/checks/libraries/pontoon_db.py
+++ b/pontoon/checks/libraries/pontoon_db.py
@@ -28,20 +28,27 @@
class IsEmptyVisitor(Visitor):
def __init__(self):
- self.is_empty = True
+ self.is_empty = False
+ self.is_pattern_empty = True
+
+ def visit_Pattern(self, node):
+ self.is_pattern_empty = True
+ self.visit(node.elements)
+ if self.is_pattern_empty:
+ self.is_empty = True
def visit_Placeable(self, node):
if isinstance(node.expression, ast.Literal):
if node.expression.parse()["value"]:
- self.is_empty = False
+ self.is_pattern_empty = False
elif isinstance(node.expression, ast.SelectExpression):
self.generic_visit(node.expression)
else:
- self.is_empty = False
+ self.is_pattern_empty = False
def visit_TextElement(self, node):
if node.value:
- self.is_empty = False
+ self.is_pattern_empty = False
def run_checks(entity, original, string):
| {"golden_diff": "diff --git a/pontoon/checks/libraries/pontoon_db.py b/pontoon/checks/libraries/pontoon_db.py\n--- a/pontoon/checks/libraries/pontoon_db.py\n+++ b/pontoon/checks/libraries/pontoon_db.py\n@@ -28,20 +28,27 @@\n \n class IsEmptyVisitor(Visitor):\n def __init__(self):\n- self.is_empty = True\n+ self.is_empty = False\n+ self.is_pattern_empty = True\n+\n+ def visit_Pattern(self, node):\n+ self.is_pattern_empty = True\n+ self.visit(node.elements)\n+ if self.is_pattern_empty:\n+ self.is_empty = True\n \n def visit_Placeable(self, node):\n if isinstance(node.expression, ast.Literal):\n if node.expression.parse()[\"value\"]:\n- self.is_empty = False\n+ self.is_pattern_empty = False\n elif isinstance(node.expression, ast.SelectExpression):\n self.generic_visit(node.expression)\n else:\n- self.is_empty = False\n+ self.is_pattern_empty = False\n \n def visit_TextElement(self, node):\n if node.value:\n- self.is_empty = False\n+ self.is_pattern_empty = False\n \n \n def run_checks(entity, original, string):\n", "issue": "Warn on empty Fluent rich editor fields\n> In addition to the bug, I believe this is a UX regression? We should show a warning that one of the values is empty when trying to save such message.\r\n\r\nThe current practice is to consider any content in a Fluent value or attribute to qualify the whole message as not-empty. The check for this was added in #2675. Previously empty attributes may indeed have resulted in errors, due to stringifying as invalid Fluent.\r\n\r\n_Originally posted by @eemeli in https://github.com/mozilla/pontoon/issues/2973#issuecomment-1750344569_\r\n \n", "before_files": [{"content": "import html\nimport re\n\nimport bleach\n\nfrom collections import defaultdict\nfrom fluent.syntax import FluentParser, ast\nfrom fluent.syntax.visitor import Visitor\n\nfrom pontoon.sync.formats.ftl import localizable_entries\n\n\nMAX_LENGTH_RE = re.compile(r\"MAX_LENGTH:( *)(\\d+)\", re.MULTILINE)\nparser = FluentParser()\n\n\ndef get_max_length(comment):\n \"\"\"\n Return max length value for an entity with MAX_LENTH.\n \"\"\"\n max_length = re.findall(MAX_LENGTH_RE, comment or \"\")\n\n if max_length:\n return int(max_length[0][1])\n\n return None\n\n\nclass IsEmptyVisitor(Visitor):\n def __init__(self):\n self.is_empty = True\n\n def visit_Placeable(self, node):\n if isinstance(node.expression, ast.Literal):\n if node.expression.parse()[\"value\"]:\n self.is_empty = False\n elif isinstance(node.expression, ast.SelectExpression):\n self.generic_visit(node.expression)\n else:\n self.is_empty = False\n\n def visit_TextElement(self, node):\n if node.value:\n self.is_empty = False\n\n\ndef run_checks(entity, original, string):\n \"\"\"\n Group all checks related to the base UI that get stored in the DB\n :arg pontoon.base.models.Entity entity: Source entity\n :arg basestring original: an original string\n :arg basestring string: a translation\n \"\"\"\n checks = defaultdict(list)\n resource_ext = entity.resource.format\n\n if resource_ext == \"lang\":\n # Newlines are not allowed in .lang files (bug 1190754)\n if \"\\n\" in string:\n checks[\"pErrors\"].append(\"Newline characters are not allowed\")\n\n # Prevent translations exceeding the given length limit\n max_length = get_max_length(entity.comment)\n\n if max_length:\n string_length = len(\n html.unescape(bleach.clean(string, strip=True, tags=()))\n )\n\n if string_length > max_length:\n checks[\"pErrors\"].append(\"Translation too long\")\n\n # Bug 1599056: Original and translation must either both end in a newline,\n # or none of them should.\n if resource_ext == \"po\":\n if original.endswith(\"\\n\") != string.endswith(\"\\n\"):\n checks[\"pErrors\"].append(\"Ending newline mismatch\")\n\n # Prevent empty translation submissions if not supported\n if string == \"\" and not entity.resource.allows_empty_translations:\n checks[\"pErrors\"].append(\"Empty translations are not allowed\")\n\n # FTL checks\n if resource_ext == \"ftl\" and string != \"\":\n translation_ast = parser.parse_entry(string)\n entity_ast = parser.parse_entry(entity.string)\n\n # Parse error\n if isinstance(translation_ast, ast.Junk):\n checks[\"pErrors\"].append(translation_ast.annotations[0].message)\n\n # Not a localizable entry\n elif not isinstance(translation_ast, localizable_entries):\n checks[\"pErrors\"].append(\n \"Translation needs to be a valid localizable entry\"\n )\n\n # Message ID mismatch\n elif entity_ast.id.name != translation_ast.id.name:\n checks[\"pErrors\"].append(\"Translation key needs to match source string key\")\n\n # Empty translation entry warning; set here rather than pontoon_non_db.py\n # to avoid needing to parse the Fluent message twice.\n else:\n visitor = IsEmptyVisitor()\n visitor.visit(translation_ast)\n if visitor.is_empty:\n checks[\"pndbWarnings\"].append(\"Empty translation\")\n\n return checks\n", "path": "pontoon/checks/libraries/pontoon_db.py"}], "after_files": [{"content": "import html\nimport re\n\nimport bleach\n\nfrom collections import defaultdict\nfrom fluent.syntax import FluentParser, ast\nfrom fluent.syntax.visitor import Visitor\n\nfrom pontoon.sync.formats.ftl import localizable_entries\n\n\nMAX_LENGTH_RE = re.compile(r\"MAX_LENGTH:( *)(\\d+)\", re.MULTILINE)\nparser = FluentParser()\n\n\ndef get_max_length(comment):\n \"\"\"\n Return max length value for an entity with MAX_LENTH.\n \"\"\"\n max_length = re.findall(MAX_LENGTH_RE, comment or \"\")\n\n if max_length:\n return int(max_length[0][1])\n\n return None\n\n\nclass IsEmptyVisitor(Visitor):\n def __init__(self):\n self.is_empty = False\n self.is_pattern_empty = True\n\n def visit_Pattern(self, node):\n self.is_pattern_empty = True\n self.visit(node.elements)\n if self.is_pattern_empty:\n self.is_empty = True\n\n def visit_Placeable(self, node):\n if isinstance(node.expression, ast.Literal):\n if node.expression.parse()[\"value\"]:\n self.is_pattern_empty = False\n elif isinstance(node.expression, ast.SelectExpression):\n self.generic_visit(node.expression)\n else:\n self.is_pattern_empty = False\n\n def visit_TextElement(self, node):\n if node.value:\n self.is_pattern_empty = False\n\n\ndef run_checks(entity, original, string):\n \"\"\"\n Group all checks related to the base UI that get stored in the DB\n :arg pontoon.base.models.Entity entity: Source entity\n :arg basestring original: an original string\n :arg basestring string: a translation\n \"\"\"\n checks = defaultdict(list)\n resource_ext = entity.resource.format\n\n if resource_ext == \"lang\":\n # Newlines are not allowed in .lang files (bug 1190754)\n if \"\\n\" in string:\n checks[\"pErrors\"].append(\"Newline characters are not allowed\")\n\n # Prevent translations exceeding the given length limit\n max_length = get_max_length(entity.comment)\n\n if max_length:\n string_length = len(\n html.unescape(bleach.clean(string, strip=True, tags=()))\n )\n\n if string_length > max_length:\n checks[\"pErrors\"].append(\"Translation too long\")\n\n # Bug 1599056: Original and translation must either both end in a newline,\n # or none of them should.\n if resource_ext == \"po\":\n if original.endswith(\"\\n\") != string.endswith(\"\\n\"):\n checks[\"pErrors\"].append(\"Ending newline mismatch\")\n\n # Prevent empty translation submissions if not supported\n if string == \"\" and not entity.resource.allows_empty_translations:\n checks[\"pErrors\"].append(\"Empty translations are not allowed\")\n\n # FTL checks\n if resource_ext == \"ftl\" and string != \"\":\n translation_ast = parser.parse_entry(string)\n entity_ast = parser.parse_entry(entity.string)\n\n # Parse error\n if isinstance(translation_ast, ast.Junk):\n checks[\"pErrors\"].append(translation_ast.annotations[0].message)\n\n # Not a localizable entry\n elif not isinstance(translation_ast, localizable_entries):\n checks[\"pErrors\"].append(\n \"Translation needs to be a valid localizable entry\"\n )\n\n # Message ID mismatch\n elif entity_ast.id.name != translation_ast.id.name:\n checks[\"pErrors\"].append(\"Translation key needs to match source string key\")\n\n # Empty translation entry warning; set here rather than pontoon_non_db.py\n # to avoid needing to parse the Fluent message twice.\n else:\n visitor = IsEmptyVisitor()\n visitor.visit(translation_ast)\n if visitor.is_empty:\n checks[\"pndbWarnings\"].append(\"Empty translation\")\n\n return checks\n", "path": "pontoon/checks/libraries/pontoon_db.py"}]} | 1,400 | 280 |
gh_patches_debug_10412 | rasdani/github-patches | git_diff | keras-team__keras-19382 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CenterCrop.compute_output_shape() does not consider the list compatibility
When the input is a list of tensor, CenterCrop can correctly generate the output, while` CenterCrop.compute_output_shape()` will generate a wrong shape. Please refer to this [gist](https://colab.research.google.com/drive/1JLuEr0-dNCmawTb2dRX7kaSxOKGZ6Obv?usp=sharing).
Interestingly, I found the test code in [center_crop_test.py, ](https://github.com/keras-team/keras/blob/master/keras/layers/preprocessing/center_crop_test.py#L178) has tested the list compatibility, while its `compute_output_shape` function missed it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras/layers/preprocessing/center_crop.py`
Content:
```
1 from keras import backend
2 from keras.api_export import keras_export
3 from keras.layers.preprocessing.tf_data_layer import TFDataLayer
4 from keras.utils import image_utils
5
6
7 @keras_export("keras.layers.CenterCrop")
8 class CenterCrop(TFDataLayer):
9 """A preprocessing layer which crops images.
10
11 This layers crops the central portion of the images to a target size. If an
12 image is smaller than the target size, it will be resized and cropped
13 so as to return the largest possible window in the image that matches
14 the target aspect ratio.
15
16 Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`).
17
18 Input shape:
19 3D (unbatched) or 4D (batched) tensor with shape:
20 `(..., height, width, channels)`, in `"channels_last"` format,
21 or `(..., channels, height, width)`, in `"channels_first"` format.
22
23 Output shape:
24 3D (unbatched) or 4D (batched) tensor with shape:
25 `(..., target_height, target_width, channels)`,
26 or `(..., channels, target_height, target_width)`,
27 in `"channels_first"` format.
28
29 If the input height/width is even and the target height/width is odd (or
30 inversely), the input image is left-padded by 1 pixel.
31
32 **Note:** This layer is safe to use inside a `tf.data` pipeline
33 (independently of which backend you're using).
34
35 Args:
36 height: Integer, the height of the output shape.
37 width: Integer, the width of the output shape.
38 data_format: string, either `"channels_last"` or `"channels_first"`.
39 The ordering of the dimensions in the inputs. `"channels_last"`
40 corresponds to inputs with shape `(batch, height, width, channels)`
41 while `"channels_first"` corresponds to inputs with shape
42 `(batch, channels, height, width)`. It defaults to the
43 `image_data_format` value found in your Keras config file at
44 `~/.keras/keras.json`. If you never set it, then it will be
45 `"channels_last"`.
46 """
47
48 def __init__(self, height, width, data_format=None, **kwargs):
49 super().__init__(**kwargs)
50 self.height = height
51 self.width = width
52 self.data_format = backend.standardize_data_format(data_format)
53
54 def call(self, inputs):
55 inputs = self.backend.cast(inputs, self.compute_dtype)
56 if self.data_format == "channels_first":
57 init_height = inputs.shape[-2]
58 init_width = inputs.shape[-1]
59 else:
60 init_height = inputs.shape[-3]
61 init_width = inputs.shape[-2]
62
63 if init_height is None or init_width is None:
64 # Dynamic size case. TODO.
65 raise ValueError(
66 "At this time, CenterCrop can only "
67 "process images with a static spatial "
68 f"shape. Received: inputs.shape={inputs.shape}"
69 )
70
71 h_diff = init_height - self.height
72 w_diff = init_width - self.width
73
74 h_start = int(h_diff / 2)
75 w_start = int(w_diff / 2)
76
77 if h_diff >= 0 and w_diff >= 0:
78 if len(inputs.shape) == 4:
79 if self.data_format == "channels_first":
80 return inputs[
81 :,
82 :,
83 h_start : h_start + self.height,
84 w_start : w_start + self.width,
85 ]
86 return inputs[
87 :,
88 h_start : h_start + self.height,
89 w_start : w_start + self.width,
90 :,
91 ]
92 elif len(inputs.shape) == 3:
93 if self.data_format == "channels_first":
94 return inputs[
95 :,
96 h_start : h_start + self.height,
97 w_start : w_start + self.width,
98 ]
99 return inputs[
100 h_start : h_start + self.height,
101 w_start : w_start + self.width,
102 :,
103 ]
104
105 return image_utils.smart_resize(
106 inputs,
107 [self.height, self.width],
108 data_format=self.data_format,
109 backend_module=self.backend,
110 )
111
112 def compute_output_shape(self, input_shape):
113 input_shape = list(input_shape)
114 if len(input_shape) == 4:
115 if self.data_format == "channels_last":
116 input_shape[1] = self.height
117 input_shape[2] = self.width
118 else:
119 input_shape[2] = self.height
120 input_shape[3] = self.width
121 else:
122 if self.data_format == "channels_last":
123 input_shape[0] = self.height
124 input_shape[1] = self.width
125 else:
126 input_shape[1] = self.height
127 input_shape[2] = self.width
128 return tuple(input_shape)
129
130 def get_config(self):
131 base_config = super().get_config()
132 config = {
133 "height": self.height,
134 "width": self.width,
135 "data_format": self.data_format,
136 }
137 return {**base_config, **config}
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/keras/layers/preprocessing/center_crop.py b/keras/layers/preprocessing/center_crop.py
--- a/keras/layers/preprocessing/center_crop.py
+++ b/keras/layers/preprocessing/center_crop.py
@@ -111,6 +111,13 @@
def compute_output_shape(self, input_shape):
input_shape = list(input_shape)
+ if isinstance(input_shape[0], (list, tuple)) or len(
+ input_shape
+ ) not in (3, 4):
+ raise ValueError(
+ "`input_shape` must be a non-nested tuple or list "
+ "of rank-1 with size 3 (unbatched) or 4 (batched). "
+ )
if len(input_shape) == 4:
if self.data_format == "channels_last":
input_shape[1] = self.height
| {"golden_diff": "diff --git a/keras/layers/preprocessing/center_crop.py b/keras/layers/preprocessing/center_crop.py\n--- a/keras/layers/preprocessing/center_crop.py\n+++ b/keras/layers/preprocessing/center_crop.py\n@@ -111,6 +111,13 @@\n \n def compute_output_shape(self, input_shape):\n input_shape = list(input_shape)\n+ if isinstance(input_shape[0], (list, tuple)) or len(\n+ input_shape\n+ ) not in (3, 4):\n+ raise ValueError(\n+ \"`input_shape` must be a non-nested tuple or list \"\n+ \"of rank-1 with size 3 (unbatched) or 4 (batched). \"\n+ )\n if len(input_shape) == 4:\n if self.data_format == \"channels_last\":\n input_shape[1] = self.height\n", "issue": "CenterCrop.compute_output_shape() does not consider the list compatibility\nWhen the input is a list of tensor, CenterCrop can correctly generate the output, while` CenterCrop.compute_output_shape()` will generate a wrong shape. Please refer to this [gist](https://colab.research.google.com/drive/1JLuEr0-dNCmawTb2dRX7kaSxOKGZ6Obv?usp=sharing).\r\n\r\nInterestingly, I found the test code in [center_crop_test.py, ](https://github.com/keras-team/keras/blob/master/keras/layers/preprocessing/center_crop_test.py#L178) has tested the list compatibility, while its `compute_output_shape` function missed it.\n", "before_files": [{"content": "from keras import backend\nfrom keras.api_export import keras_export\nfrom keras.layers.preprocessing.tf_data_layer import TFDataLayer\nfrom keras.utils import image_utils\n\n\n@keras_export(\"keras.layers.CenterCrop\")\nclass CenterCrop(TFDataLayer):\n \"\"\"A preprocessing layer which crops images.\n\n This layers crops the central portion of the images to a target size. If an\n image is smaller than the target size, it will be resized and cropped\n so as to return the largest possible window in the image that matches\n the target aspect ratio.\n\n Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`).\n\n Input shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format,\n or `(..., channels, height, width)`, in `\"channels_first\"` format.\n\n Output shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., target_height, target_width, channels)`,\n or `(..., channels, target_height, target_width)`,\n in `\"channels_first\"` format.\n\n If the input height/width is even and the target height/width is odd (or\n inversely), the input image is left-padded by 1 pixel.\n\n **Note:** This layer is safe to use inside a `tf.data` pipeline\n (independently of which backend you're using).\n\n Args:\n height: Integer, the height of the output shape.\n width: Integer, the width of the output shape.\n data_format: string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, height, width, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, channels, height, width)`. It defaults to the\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json`. If you never set it, then it will be\n `\"channels_last\"`.\n \"\"\"\n\n def __init__(self, height, width, data_format=None, **kwargs):\n super().__init__(**kwargs)\n self.height = height\n self.width = width\n self.data_format = backend.standardize_data_format(data_format)\n\n def call(self, inputs):\n inputs = self.backend.cast(inputs, self.compute_dtype)\n if self.data_format == \"channels_first\":\n init_height = inputs.shape[-2]\n init_width = inputs.shape[-1]\n else:\n init_height = inputs.shape[-3]\n init_width = inputs.shape[-2]\n\n if init_height is None or init_width is None:\n # Dynamic size case. TODO.\n raise ValueError(\n \"At this time, CenterCrop can only \"\n \"process images with a static spatial \"\n f\"shape. Received: inputs.shape={inputs.shape}\"\n )\n\n h_diff = init_height - self.height\n w_diff = init_width - self.width\n\n h_start = int(h_diff / 2)\n w_start = int(w_diff / 2)\n\n if h_diff >= 0 and w_diff >= 0:\n if len(inputs.shape) == 4:\n if self.data_format == \"channels_first\":\n return inputs[\n :,\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n ]\n return inputs[\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n :,\n ]\n elif len(inputs.shape) == 3:\n if self.data_format == \"channels_first\":\n return inputs[\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n ]\n return inputs[\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n :,\n ]\n\n return image_utils.smart_resize(\n inputs,\n [self.height, self.width],\n data_format=self.data_format,\n backend_module=self.backend,\n )\n\n def compute_output_shape(self, input_shape):\n input_shape = list(input_shape)\n if len(input_shape) == 4:\n if self.data_format == \"channels_last\":\n input_shape[1] = self.height\n input_shape[2] = self.width\n else:\n input_shape[2] = self.height\n input_shape[3] = self.width\n else:\n if self.data_format == \"channels_last\":\n input_shape[0] = self.height\n input_shape[1] = self.width\n else:\n input_shape[1] = self.height\n input_shape[2] = self.width\n return tuple(input_shape)\n\n def get_config(self):\n base_config = super().get_config()\n config = {\n \"height\": self.height,\n \"width\": self.width,\n \"data_format\": self.data_format,\n }\n return {**base_config, **config}\n", "path": "keras/layers/preprocessing/center_crop.py"}], "after_files": [{"content": "from keras import backend\nfrom keras.api_export import keras_export\nfrom keras.layers.preprocessing.tf_data_layer import TFDataLayer\nfrom keras.utils import image_utils\n\n\n@keras_export(\"keras.layers.CenterCrop\")\nclass CenterCrop(TFDataLayer):\n \"\"\"A preprocessing layer which crops images.\n\n This layers crops the central portion of the images to a target size. If an\n image is smaller than the target size, it will be resized and cropped\n so as to return the largest possible window in the image that matches\n the target aspect ratio.\n\n Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`).\n\n Input shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format,\n or `(..., channels, height, width)`, in `\"channels_first\"` format.\n\n Output shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., target_height, target_width, channels)`,\n or `(..., channels, target_height, target_width)`,\n in `\"channels_first\"` format.\n\n If the input height/width is even and the target height/width is odd (or\n inversely), the input image is left-padded by 1 pixel.\n\n **Note:** This layer is safe to use inside a `tf.data` pipeline\n (independently of which backend you're using).\n\n Args:\n height: Integer, the height of the output shape.\n width: Integer, the width of the output shape.\n data_format: string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, height, width, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, channels, height, width)`. It defaults to the\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json`. If you never set it, then it will be\n `\"channels_last\"`.\n \"\"\"\n\n def __init__(self, height, width, data_format=None, **kwargs):\n super().__init__(**kwargs)\n self.height = height\n self.width = width\n self.data_format = backend.standardize_data_format(data_format)\n\n def call(self, inputs):\n inputs = self.backend.cast(inputs, self.compute_dtype)\n if self.data_format == \"channels_first\":\n init_height = inputs.shape[-2]\n init_width = inputs.shape[-1]\n else:\n init_height = inputs.shape[-3]\n init_width = inputs.shape[-2]\n\n if init_height is None or init_width is None:\n # Dynamic size case. TODO.\n raise ValueError(\n \"At this time, CenterCrop can only \"\n \"process images with a static spatial \"\n f\"shape. Received: inputs.shape={inputs.shape}\"\n )\n\n h_diff = init_height - self.height\n w_diff = init_width - self.width\n\n h_start = int(h_diff / 2)\n w_start = int(w_diff / 2)\n\n if h_diff >= 0 and w_diff >= 0:\n if len(inputs.shape) == 4:\n if self.data_format == \"channels_first\":\n return inputs[\n :,\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n ]\n return inputs[\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n :,\n ]\n elif len(inputs.shape) == 3:\n if self.data_format == \"channels_first\":\n return inputs[\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n ]\n return inputs[\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n :,\n ]\n\n return image_utils.smart_resize(\n inputs,\n [self.height, self.width],\n data_format=self.data_format,\n backend_module=self.backend,\n )\n\n def compute_output_shape(self, input_shape):\n input_shape = list(input_shape)\n if isinstance(input_shape[0], (list, tuple)) or len(\n input_shape\n ) not in (3, 4):\n raise ValueError(\n \"`input_shape` must be a non-nested tuple or list \"\n \"of rank-1 with size 3 (unbatched) or 4 (batched). \"\n )\n if len(input_shape) == 4:\n if self.data_format == \"channels_last\":\n input_shape[1] = self.height\n input_shape[2] = self.width\n else:\n input_shape[2] = self.height\n input_shape[3] = self.width\n else:\n if self.data_format == \"channels_last\":\n input_shape[0] = self.height\n input_shape[1] = self.width\n else:\n input_shape[1] = self.height\n input_shape[2] = self.width\n return tuple(input_shape)\n\n def get_config(self):\n base_config = super().get_config()\n config = {\n \"height\": self.height,\n \"width\": self.width,\n \"data_format\": self.data_format,\n }\n return {**base_config, **config}\n", "path": "keras/layers/preprocessing/center_crop.py"}]} | 1,851 | 201 |
gh_patches_debug_6913 | rasdani/github-patches | git_diff | horovod__horovod-214 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error on using hvd.mpi_threads_supported() error: undefined symbol: mpi_threads_supported
Error on using hvd.mpi_threads_supported()
error: undefined symbol: mpi_threads_supported
`mpi_threads_supported = MPI_COMMON_LIB_CTYPES.mpi_threads_supported()`
should be
`mpi_threads_supported = MPI_COMMON_LIB_CTYPES.horovod_mpi_threads_supported()`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `horovod/common/__init__.py`
Content:
```
1 # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
2 # Modifications copyright (C) 2018 Uber Technologies, Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 # =============================================================================
16
17 import ctypes
18 import os
19 import sysconfig
20
21
22 def get_ext_suffix():
23 """Determine library extension for various versions of Python."""
24 ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
25 if ext_suffix:
26 return ext_suffix
27
28 ext_suffix = sysconfig.get_config_var('SO')
29 if ext_suffix:
30 return ext_suffix
31
32 return '.so'
33
34
35 MPI_COMMON_LIB_CTYPES = \
36 ctypes.CDLL(os.path.join(os.path.dirname(__file__),
37 'mpi_lib' + get_ext_suffix()), mode=ctypes.RTLD_GLOBAL)
38
39
40 def init():
41 """A function that initializes Horovod.
42 """
43 return MPI_COMMON_LIB_CTYPES.horovod_init()
44
45
46 def size():
47 """A function that returns the number of Horovod processes.
48
49 Returns:
50 An integer scalar containing the number of Horovod processes.
51 """
52 size = MPI_COMMON_LIB_CTYPES.horovod_size()
53 if size == -1:
54 raise ValueError(
55 'Horovod has not been initialized; use hvd.init().')
56 return size
57
58
59 def local_size():
60 """A function that returns the number of Horovod processes within the
61 node the current process is running on.
62
63 Returns:
64 An integer scalar containing the number of local Horovod processes.
65 """
66 local_size = MPI_COMMON_LIB_CTYPES.horovod_local_size()
67 if local_size == -1:
68 raise ValueError(
69 'Horovod has not been initialized; use hvd.init().')
70 return local_size
71
72
73 def rank():
74 """A function that returns the Horovod rank of the calling process.
75
76 Returns:
77 An integer scalar with the Horovod rank of the calling process.
78 """
79 rank = MPI_COMMON_LIB_CTYPES.horovod_rank()
80 if rank == -1:
81 raise ValueError(
82 'Horovod has not been initialized; use hvd.init().')
83 return rank
84
85
86 def local_rank():
87 """A function that returns the local Horovod rank of the calling process, within the
88 node that it is running on. For example, if there are seven processes running
89 on a node, their local ranks will be zero through six, inclusive.
90
91 Returns:
92 An integer scalar with the local Horovod rank of the calling process.
93 """
94 local_rank = MPI_COMMON_LIB_CTYPES.horovod_local_rank()
95 if local_rank == -1:
96 raise ValueError(
97 'Horovod has not been initialized; use hvd.init().')
98 return local_rank
99
100
101 def mpi_threads_supported():
102 """A function that returns a flag indicating whether MPI multi-threading is supported.
103
104 If MPI multi-threading is supported, users may mix and match Horovod usage with other
105 MPI libraries, such as `mpi4py`.
106
107 Returns:
108 A boolean value indicating whether MPI multi-threading is supported.
109 """
110 mpi_threads_supported = MPI_COMMON_LIB_CTYPES.mpi_threads_supported()
111 if mpi_threads_supported == -1:
112 raise ValueError(
113 'Horovod has not been initialized; use hvd.init().')
114 return bool(mpi_threads_supported)
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/horovod/common/__init__.py b/horovod/common/__init__.py
--- a/horovod/common/__init__.py
+++ b/horovod/common/__init__.py
@@ -107,7 +107,7 @@
Returns:
A boolean value indicating whether MPI multi-threading is supported.
"""
- mpi_threads_supported = MPI_COMMON_LIB_CTYPES.mpi_threads_supported()
+ mpi_threads_supported = MPI_COMMON_LIB_CTYPES.horovod_mpi_threads_supported()
if mpi_threads_supported == -1:
raise ValueError(
'Horovod has not been initialized; use hvd.init().')
| {"golden_diff": "diff --git a/horovod/common/__init__.py b/horovod/common/__init__.py\n--- a/horovod/common/__init__.py\n+++ b/horovod/common/__init__.py\n@@ -107,7 +107,7 @@\n Returns:\n A boolean value indicating whether MPI multi-threading is supported.\n \"\"\"\n- mpi_threads_supported = MPI_COMMON_LIB_CTYPES.mpi_threads_supported()\n+ mpi_threads_supported = MPI_COMMON_LIB_CTYPES.horovod_mpi_threads_supported()\n if mpi_threads_supported == -1:\n raise ValueError(\n 'Horovod has not been initialized; use hvd.init().')\n", "issue": "Error on using hvd.mpi_threads_supported() error: undefined symbol: mpi_threads_supported\nError on using hvd.mpi_threads_supported() \r\n\r\nerror: undefined symbol: mpi_threads_supported\r\n\r\n`mpi_threads_supported = MPI_COMMON_LIB_CTYPES.mpi_threads_supported()`\r\n should be \r\n`mpi_threads_supported = MPI_COMMON_LIB_CTYPES.horovod_mpi_threads_supported()`\r\n\n", "before_files": [{"content": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n# Modifications copyright (C) 2018 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nimport ctypes\nimport os\nimport sysconfig\n\n\ndef get_ext_suffix():\n \"\"\"Determine library extension for various versions of Python.\"\"\"\n ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')\n if ext_suffix:\n return ext_suffix\n\n ext_suffix = sysconfig.get_config_var('SO')\n if ext_suffix:\n return ext_suffix\n\n return '.so'\n\n\nMPI_COMMON_LIB_CTYPES = \\\n ctypes.CDLL(os.path.join(os.path.dirname(__file__),\n 'mpi_lib' + get_ext_suffix()), mode=ctypes.RTLD_GLOBAL)\n\n\ndef init():\n \"\"\"A function that initializes Horovod.\n \"\"\"\n return MPI_COMMON_LIB_CTYPES.horovod_init()\n\n\ndef size():\n \"\"\"A function that returns the number of Horovod processes.\n\n Returns:\n An integer scalar containing the number of Horovod processes.\n \"\"\"\n size = MPI_COMMON_LIB_CTYPES.horovod_size()\n if size == -1:\n raise ValueError(\n 'Horovod has not been initialized; use hvd.init().')\n return size\n\n\ndef local_size():\n \"\"\"A function that returns the number of Horovod processes within the\n node the current process is running on.\n\n Returns:\n An integer scalar containing the number of local Horovod processes.\n \"\"\"\n local_size = MPI_COMMON_LIB_CTYPES.horovod_local_size()\n if local_size == -1:\n raise ValueError(\n 'Horovod has not been initialized; use hvd.init().')\n return local_size\n\n\ndef rank():\n \"\"\"A function that returns the Horovod rank of the calling process.\n\n Returns:\n An integer scalar with the Horovod rank of the calling process.\n \"\"\"\n rank = MPI_COMMON_LIB_CTYPES.horovod_rank()\n if rank == -1:\n raise ValueError(\n 'Horovod has not been initialized; use hvd.init().')\n return rank\n\n\ndef local_rank():\n \"\"\"A function that returns the local Horovod rank of the calling process, within the\n node that it is running on. For example, if there are seven processes running\n on a node, their local ranks will be zero through six, inclusive.\n\n Returns:\n An integer scalar with the local Horovod rank of the calling process.\n \"\"\"\n local_rank = MPI_COMMON_LIB_CTYPES.horovod_local_rank()\n if local_rank == -1:\n raise ValueError(\n 'Horovod has not been initialized; use hvd.init().')\n return local_rank\n\n\ndef mpi_threads_supported():\n \"\"\"A function that returns a flag indicating whether MPI multi-threading is supported.\n\n If MPI multi-threading is supported, users may mix and match Horovod usage with other\n MPI libraries, such as `mpi4py`.\n\n Returns:\n A boolean value indicating whether MPI multi-threading is supported.\n \"\"\"\n mpi_threads_supported = MPI_COMMON_LIB_CTYPES.mpi_threads_supported()\n if mpi_threads_supported == -1:\n raise ValueError(\n 'Horovod has not been initialized; use hvd.init().')\n return bool(mpi_threads_supported)\n", "path": "horovod/common/__init__.py"}], "after_files": [{"content": "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n# Modifications copyright (C) 2018 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\nimport ctypes\nimport os\nimport sysconfig\n\n\ndef get_ext_suffix():\n \"\"\"Determine library extension for various versions of Python.\"\"\"\n ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')\n if ext_suffix:\n return ext_suffix\n\n ext_suffix = sysconfig.get_config_var('SO')\n if ext_suffix:\n return ext_suffix\n\n return '.so'\n\n\nMPI_COMMON_LIB_CTYPES = \\\n ctypes.CDLL(os.path.join(os.path.dirname(__file__),\n 'mpi_lib' + get_ext_suffix()), mode=ctypes.RTLD_GLOBAL)\n\n\ndef init():\n \"\"\"A function that initializes Horovod.\n \"\"\"\n return MPI_COMMON_LIB_CTYPES.horovod_init()\n\n\ndef size():\n \"\"\"A function that returns the number of Horovod processes.\n\n Returns:\n An integer scalar containing the number of Horovod processes.\n \"\"\"\n size = MPI_COMMON_LIB_CTYPES.horovod_size()\n if size == -1:\n raise ValueError(\n 'Horovod has not been initialized; use hvd.init().')\n return size\n\n\ndef local_size():\n \"\"\"A function that returns the number of Horovod processes within the\n node the current process is running on.\n\n Returns:\n An integer scalar containing the number of local Horovod processes.\n \"\"\"\n local_size = MPI_COMMON_LIB_CTYPES.horovod_local_size()\n if local_size == -1:\n raise ValueError(\n 'Horovod has not been initialized; use hvd.init().')\n return local_size\n\n\ndef rank():\n \"\"\"A function that returns the Horovod rank of the calling process.\n\n Returns:\n An integer scalar with the Horovod rank of the calling process.\n \"\"\"\n rank = MPI_COMMON_LIB_CTYPES.horovod_rank()\n if rank == -1:\n raise ValueError(\n 'Horovod has not been initialized; use hvd.init().')\n return rank\n\n\ndef local_rank():\n \"\"\"A function that returns the local Horovod rank of the calling process, within the\n node that it is running on. For example, if there are seven processes running\n on a node, their local ranks will be zero through six, inclusive.\n\n Returns:\n An integer scalar with the local Horovod rank of the calling process.\n \"\"\"\n local_rank = MPI_COMMON_LIB_CTYPES.horovod_local_rank()\n if local_rank == -1:\n raise ValueError(\n 'Horovod has not been initialized; use hvd.init().')\n return local_rank\n\n\ndef mpi_threads_supported():\n \"\"\"A function that returns a flag indicating whether MPI multi-threading is supported.\n\n If MPI multi-threading is supported, users may mix and match Horovod usage with other\n MPI libraries, such as `mpi4py`.\n\n Returns:\n A boolean value indicating whether MPI multi-threading is supported.\n \"\"\"\n mpi_threads_supported = MPI_COMMON_LIB_CTYPES.horovod_mpi_threads_supported()\n if mpi_threads_supported == -1:\n raise ValueError(\n 'Horovod has not been initialized; use hvd.init().')\n return bool(mpi_threads_supported)\n", "path": "horovod/common/__init__.py"}]} | 1,414 | 144 |
gh_patches_debug_32181 | rasdani/github-patches | git_diff | tensorflow__addons-1281 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Segmentation fault when using tfa.rotate in tf-nightly
**System information**
- OS Platform: Linux Ubuntu 18.04.3 LTS
- TensorFlow version: tf-nightly (2.2.0-dev20200310) installed with pip
- TensorFlow-Addons version: 0.8.3 (pip) and tfa-nightly (0.9.0-dev; pip)
- Python version: 3.6.9
- Is GPU used?: yes
**Describe the bug**
I currently have to use tf-nightly because of a separate issue. When I use tfa.rotate I get a segmentation fault. Behavior is the same with tfa 0.8.3 and tfa-nightly.
**Code to reproduce the issue**
```
import tensorflow as tf
import tensorflow_addons as tfa
img = tf.io.read_file(IMG_PATH)
img = tf.image.decode_png(img)
print(img.shape)
img = tfa.image.rotate(img, 90)
```
Output:
> (128, 128, 3)
> Segmentation fault (core dumped)
I also tried `interpolation="BILINEAR"`. Problem is the same.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensorflow_addons/utils/ensure_tf_install.py`
Content:
```
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15
16
17 # Ensure TensorFlow is importable and its version is sufficiently recent. This
18 # needs to happen before anything else, since the imports below will try to
19 # import tensorflow, too.
20 def _ensure_tf_install():
21 """Attempt to import tensorflow, and ensure its version is sufficient.
22 Raises:
23 ImportError: if either tensorflow is not importable or its version is
24 inadequate.
25 """
26 import tensorflow as tf
27 import distutils.version
28
29 #
30 # Update this whenever we need to depend on a newer TensorFlow release.
31 #
32 required_tensorflow_version = "2.1.0"
33
34 if distutils.version.LooseVersion(tf.__version__) < distutils.version.LooseVersion(
35 required_tensorflow_version
36 ):
37 raise ImportError(
38 "This version of TensorFlow Addons requires TensorFlow "
39 "version >= {required}; Detected an installation of version "
40 "{present}. Please upgrade TensorFlow to proceed.".format(
41 required=required_tensorflow_version, present=tf.__version__
42 )
43 )
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tensorflow_addons/utils/ensure_tf_install.py b/tensorflow_addons/utils/ensure_tf_install.py
--- a/tensorflow_addons/utils/ensure_tf_install.py
+++ b/tensorflow_addons/utils/ensure_tf_install.py
@@ -17,27 +17,40 @@
# Ensure TensorFlow is importable and its version is sufficiently recent. This
# needs to happen before anything else, since the imports below will try to
# import tensorflow, too.
+
+from distutils.version import LooseVersion
+import warnings
+
+import tensorflow as tf
+
+
+warning_template = """
+This version of TensorFlow Addons requires TensorFlow {required}.
+Detected an installation of version {present}.
+
+While some functions might work, TensorFlow Addons was not tested
+with this TensorFlow version. Also custom ops were not compiled
+against this version of TensorFlow. If you use custom ops,
+you might get errors (segmentation faults for example).
+
+It might help you to fallback to pure Python ops with
+TF_ADDONS_PY_OPS . To do that, see
+https://github.com/tensorflow/addons#gpucpu-custom-ops
+
+If you encounter errors, do *not* file bugs in GitHub because
+the version of TensorFlow you are using is not supported.
+"""
+
+
def _ensure_tf_install():
- """Attempt to import tensorflow, and ensure its version is sufficient.
- Raises:
- ImportError: if either tensorflow is not importable or its version is
- inadequate.
+ """Warn the user if the version of TensorFlow used is not supported.
"""
- import tensorflow as tf
- import distutils.version
- #
# Update this whenever we need to depend on a newer TensorFlow release.
- #
- required_tensorflow_version = "2.1.0"
-
- if distutils.version.LooseVersion(tf.__version__) < distutils.version.LooseVersion(
- required_tensorflow_version
- ):
- raise ImportError(
- "This version of TensorFlow Addons requires TensorFlow "
- "version >= {required}; Detected an installation of version "
- "{present}. Please upgrade TensorFlow to proceed.".format(
- required=required_tensorflow_version, present=tf.__version__
- )
+ required_tf_version = "2.1.0"
+
+ if LooseVersion(tf.__version__) != LooseVersion(required_tf_version):
+ message = warning_template.format(
+ required=required_tf_version, present=tf.__version__
)
+ warnings.warn(message, UserWarning)
| {"golden_diff": "diff --git a/tensorflow_addons/utils/ensure_tf_install.py b/tensorflow_addons/utils/ensure_tf_install.py\n--- a/tensorflow_addons/utils/ensure_tf_install.py\n+++ b/tensorflow_addons/utils/ensure_tf_install.py\n@@ -17,27 +17,40 @@\n # Ensure TensorFlow is importable and its version is sufficiently recent. This\n # needs to happen before anything else, since the imports below will try to\n # import tensorflow, too.\n+\n+from distutils.version import LooseVersion\n+import warnings\n+\n+import tensorflow as tf\n+\n+\n+warning_template = \"\"\"\n+This version of TensorFlow Addons requires TensorFlow {required}.\n+Detected an installation of version {present}.\n+\n+While some functions might work, TensorFlow Addons was not tested\n+with this TensorFlow version. Also custom ops were not compiled\n+against this version of TensorFlow. If you use custom ops,\n+you might get errors (segmentation faults for example).\n+\n+It might help you to fallback to pure Python ops with\n+TF_ADDONS_PY_OPS . To do that, see\n+https://github.com/tensorflow/addons#gpucpu-custom-ops\n+\n+If you encounter errors, do *not* file bugs in GitHub because\n+the version of TensorFlow you are using is not supported.\n+\"\"\"\n+\n+\n def _ensure_tf_install():\n- \"\"\"Attempt to import tensorflow, and ensure its version is sufficient.\n- Raises:\n- ImportError: if either tensorflow is not importable or its version is\n- inadequate.\n+ \"\"\"Warn the user if the version of TensorFlow used is not supported.\n \"\"\"\n- import tensorflow as tf\n- import distutils.version\n \n- #\n # Update this whenever we need to depend on a newer TensorFlow release.\n- #\n- required_tensorflow_version = \"2.1.0\"\n-\n- if distutils.version.LooseVersion(tf.__version__) < distutils.version.LooseVersion(\n- required_tensorflow_version\n- ):\n- raise ImportError(\n- \"This version of TensorFlow Addons requires TensorFlow \"\n- \"version >= {required}; Detected an installation of version \"\n- \"{present}. Please upgrade TensorFlow to proceed.\".format(\n- required=required_tensorflow_version, present=tf.__version__\n- )\n+ required_tf_version = \"2.1.0\"\n+\n+ if LooseVersion(tf.__version__) != LooseVersion(required_tf_version):\n+ message = warning_template.format(\n+ required=required_tf_version, present=tf.__version__\n )\n+ warnings.warn(message, UserWarning)\n", "issue": "Segmentation fault when using tfa.rotate in tf-nightly\n**System information**\r\n- OS Platform: Linux Ubuntu 18.04.3 LTS\r\n- TensorFlow version: tf-nightly (2.2.0-dev20200310) installed with pip\r\n- TensorFlow-Addons version: 0.8.3 (pip) and tfa-nightly (0.9.0-dev; pip)\r\n- Python version: 3.6.9\r\n- Is GPU used?: yes\r\n\r\n**Describe the bug**\r\nI currently have to use tf-nightly because of a separate issue. When I use tfa.rotate I get a segmentation fault. Behavior is the same with tfa 0.8.3 and tfa-nightly.\r\n\r\n**Code to reproduce the issue**\r\n```\r\nimport tensorflow as tf\r\nimport tensorflow_addons as tfa\r\n\r\nimg = tf.io.read_file(IMG_PATH)\r\nimg = tf.image.decode_png(img)\r\nprint(img.shape)\r\nimg = tfa.image.rotate(img, 90)\r\n```\r\nOutput: \r\n> (128, 128, 3)\r\n> Segmentation fault (core dumped)\r\n\r\nI also tried `interpolation=\"BILINEAR\"`. Problem is the same. \n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\n# Ensure TensorFlow is importable and its version is sufficiently recent. This\n# needs to happen before anything else, since the imports below will try to\n# import tensorflow, too.\ndef _ensure_tf_install():\n \"\"\"Attempt to import tensorflow, and ensure its version is sufficient.\n Raises:\n ImportError: if either tensorflow is not importable or its version is\n inadequate.\n \"\"\"\n import tensorflow as tf\n import distutils.version\n\n #\n # Update this whenever we need to depend on a newer TensorFlow release.\n #\n required_tensorflow_version = \"2.1.0\"\n\n if distutils.version.LooseVersion(tf.__version__) < distutils.version.LooseVersion(\n required_tensorflow_version\n ):\n raise ImportError(\n \"This version of TensorFlow Addons requires TensorFlow \"\n \"version >= {required}; Detected an installation of version \"\n \"{present}. Please upgrade TensorFlow to proceed.\".format(\n required=required_tensorflow_version, present=tf.__version__\n )\n )\n", "path": "tensorflow_addons/utils/ensure_tf_install.py"}], "after_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\n# Ensure TensorFlow is importable and its version is sufficiently recent. This\n# needs to happen before anything else, since the imports below will try to\n# import tensorflow, too.\n\nfrom distutils.version import LooseVersion\nimport warnings\n\nimport tensorflow as tf\n\n\nwarning_template = \"\"\"\nThis version of TensorFlow Addons requires TensorFlow {required}.\nDetected an installation of version {present}.\n\nWhile some functions might work, TensorFlow Addons was not tested\nwith this TensorFlow version. Also custom ops were not compiled\nagainst this version of TensorFlow. If you use custom ops,\nyou might get errors (segmentation faults for example).\n\nIt might help you to fallback to pure Python ops with\nTF_ADDONS_PY_OPS . To do that, see\nhttps://github.com/tensorflow/addons#gpucpu-custom-ops\n\nIf you encounter errors, do *not* file bugs in GitHub because\nthe version of TensorFlow you are using is not supported.\n\"\"\"\n\n\ndef _ensure_tf_install():\n \"\"\"Warn the user if the version of TensorFlow used is not supported.\n \"\"\"\n\n # Update this whenever we need to depend on a newer TensorFlow release.\n required_tf_version = \"2.1.0\"\n\n if LooseVersion(tf.__version__) != LooseVersion(required_tf_version):\n message = warning_template.format(\n required=required_tf_version, present=tf.__version__\n )\n warnings.warn(message, UserWarning)\n", "path": "tensorflow_addons/utils/ensure_tf_install.py"}]} | 953 | 556 |
gh_patches_debug_11369 | rasdani/github-patches | git_diff | apluslms__a-plus-1348 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use setUpTestData in tests
`setUp` is ran before every test method but `setUpTestData` is run only once for the whole test class. Each test method is run in a transaction and rolled back at the end, so there is no need to re-create the database data for each method separately. Most of the time in unit tests seems to be spent on creating data for the tests, so it would be a massive speed up to reuse the data by using `setUpTestData`.
`setUpTestData` takes the class as an argument but it can be used similarly to `setUp`: just assign the properties to the class (e.g. `cls.instance = CourseInstance...`) and the property can be accessed normally through the object (e.g. `self.instance`).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/testdata.py`
Content:
```
1 from datetime import timedelta
2 from django.conf import settings
3 from django.contrib.auth.models import User
4 from django.test import TestCase
5 from django.utils import timezone
6
7 from course.models import (
8 Course,
9 CourseInstance,
10 CourseModule,
11 LearningObjectCategory,
12 )
13 from exercise.models import (
14 BaseExercise,
15 StaticExercise,
16 Submission,
17 )
18
19
20 class CourseTestCase(TestCase):
21
22 def setUp(self):
23 self.setUpCourse()
24 self.setUpSubmissions()
25
26 def setUpCourse(self):
27
28 self.now = timezone.now()
29 self.tomorrow = self.now + timedelta(days=1)
30 self.two_days_after = self.now + timedelta(days=2)
31 self.three_days_after = self.now + timedelta(days=3)
32 self.yesterday = self.now - timedelta(days=1)
33 self.two_days_before = self.now - timedelta(days=2)
34
35 self.user = User(username='testUser')
36 self.user.set_password('testPassword')
37 self.user.save()
38
39 self.teacher = User(username='testTeacher')
40 self.teacher.set_password('testPassword')
41 self.teacher.save()
42
43 self.student = User(username='testStudent')
44 self.student.set_password('testPassword')
45 self.student.save()
46 self.student.userprofile.student_id = "123TEST"
47 self.student.userprofile.organization = settings.LOCAL_ORGANIZATION
48 self.student.userprofile.save()
49
50 self.course = Course.objects.create(
51 url="course",
52 name="Test Course",
53 code="123456",
54 )
55
56 self.instance = CourseInstance.objects.create(
57 course=self.course,
58 url="instance",
59 instance_name="2016",
60 starting_time=self.now,
61 ending_time=self.tomorrow,
62 )
63 self.instance.add_teacher(self.teacher.userprofile)
64 self.instance.enroll_student(self.student)
65
66 self.module = CourseModule.objects.create(
67 course_instance=self.instance,
68 url="module",
69 name="Test Module",
70 points_to_pass=10,
71 opening_time=self.now,
72 closing_time=self.tomorrow,
73 late_submissions_allowed=True,
74 late_submission_deadline=self.two_days_after,
75 late_submission_penalty=0.2
76 )
77 self.module2 = CourseModule.objects.create(
78 course_instance=self.instance,
79 url="module2",
80 name="Test Module 2",
81 points_to_pass=0,
82 opening_time=self.tomorrow,
83 closing_time=self.two_days_after,
84 )
85 self.module0 = CourseModule.objects.create(
86 course_instance=self.instance,
87 url="module0",
88 name="Past Module",
89 points_to_pass=10,
90 opening_time=self.two_days_before,
91 closing_time=self.yesterday,
92 )
93 self.category = LearningObjectCategory.objects.create(
94 course_instance=self.instance,
95 name="Test Category",
96 points_to_pass=5,
97 )
98
99 self.exercise = StaticExercise.objects.create(
100 course_module=self.module,
101 category=self.category,
102 url='e1',
103 name="Test Exercise",
104 exercise_page_content='$$exercise$$content',
105 submission_page_content='$$exercise$$received',
106 points_to_pass=0,
107 max_points=100,
108 order=1,
109 )
110 self.exercise2 = StaticExercise.objects.create(
111 course_module=self.module,
112 category=self.category,
113 url='e2',
114 name="Test Exercise 2",
115 exercise_page_content='$$exercise2$$content',
116 submission_page_content='$$exercise2$$received',
117 points_to_pass=10,
118 max_points=100,
119 order=2,
120 )
121 self.exercise3 = StaticExercise.objects.create(
122 course_module=self.module2,
123 category=self.category,
124 url='e3',
125 name="Test Exercise 3",
126 exercise_page_content='$$exercise3$$content',
127 submission_page_content='$$exercise3$$received',
128 points_to_pass=0,
129 max_points=100,
130 )
131 self.exercise0 = BaseExercise.objects.create(
132 course_module=self.module0,
133 category=self.category,
134 url='b0',
135 name="Base Exercise 0",
136 service_url="http://localhost/",
137 points_to_pass=0,
138 max_points=100,
139 min_group_size=1,
140 max_group_size=2,
141 )
142
143 def setUpSubmissions(self):
144
145 self.submission = Submission.objects.create(
146 exercise=self.exercise,
147 submission_data={'submission':1},
148 feedback='$$submission$$feedback',
149 )
150 self.submission.submitters.add(self.student.userprofile)
151 self.submission.set_points(1,2)
152 self.submission.set_ready()
153 self.submission.save()
154
155 self.submission2 = Submission.objects.create(
156 exercise=self.exercise,
157 submission_data={'submission':2},
158 )
159 self.submission2.submitters.add(self.student.userprofile)
160
161 self.submission3 = Submission.objects.create(
162 exercise=self.exercise2,
163 submission_data={'submission':3},
164 )
165 self.submission3.submitters.add(self.student.userprofile)
166 self.submission3.submitters.add(self.user.userprofile)
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/testdata.py b/lib/testdata.py
--- a/lib/testdata.py
+++ b/lib/testdata.py
@@ -19,10 +19,12 @@
class CourseTestCase(TestCase):
- def setUp(self):
- self.setUpCourse()
- self.setUpSubmissions()
+ @classmethod
+ def setUpTestData(cls):
+ cls.setUpCourse()
+ cls.setUpSubmissions()
+ @classmethod
def setUpCourse(self):
self.now = timezone.now()
@@ -140,6 +142,7 @@
max_group_size=2,
)
+ @classmethod
def setUpSubmissions(self):
self.submission = Submission.objects.create(
| {"golden_diff": "diff --git a/lib/testdata.py b/lib/testdata.py\n--- a/lib/testdata.py\n+++ b/lib/testdata.py\n@@ -19,10 +19,12 @@\n \n class CourseTestCase(TestCase):\n \n- def setUp(self):\n- self.setUpCourse()\n- self.setUpSubmissions()\n+ @classmethod\n+ def setUpTestData(cls):\n+ cls.setUpCourse()\n+ cls.setUpSubmissions()\n \n+ @classmethod\n def setUpCourse(self):\n \n self.now = timezone.now()\n@@ -140,6 +142,7 @@\n max_group_size=2,\n )\n \n+ @classmethod\n def setUpSubmissions(self):\n \n self.submission = Submission.objects.create(\n", "issue": "Use setUpTestData in tests \n`setUp` is ran before every test method but `setUpTestData` is run only once for the whole test class. Each test method is run in a transaction and rolled back at the end, so there is no need to re-create the database data for each method separately. Most of the time in unit tests seems to be spent on creating data for the tests, so it would be a massive speed up to reuse the data by using `setUpTestData`.\r\n\r\n`setUpTestData` takes the class as an argument but it can be used similarly to `setUp`: just assign the properties to the class (e.g. `cls.instance = CourseInstance...`) and the property can be accessed normally through the object (e.g. `self.instance`).\n", "before_files": [{"content": "from datetime import timedelta\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.test import TestCase\nfrom django.utils import timezone\n\nfrom course.models import (\n Course,\n CourseInstance,\n CourseModule,\n LearningObjectCategory,\n)\nfrom exercise.models import (\n BaseExercise,\n StaticExercise,\n Submission,\n)\n\n\nclass CourseTestCase(TestCase):\n\n def setUp(self):\n self.setUpCourse()\n self.setUpSubmissions()\n\n def setUpCourse(self):\n\n self.now = timezone.now()\n self.tomorrow = self.now + timedelta(days=1)\n self.two_days_after = self.now + timedelta(days=2)\n self.three_days_after = self.now + timedelta(days=3)\n self.yesterday = self.now - timedelta(days=1)\n self.two_days_before = self.now - timedelta(days=2)\n\n self.user = User(username='testUser')\n self.user.set_password('testPassword')\n self.user.save()\n\n self.teacher = User(username='testTeacher')\n self.teacher.set_password('testPassword')\n self.teacher.save()\n\n self.student = User(username='testStudent')\n self.student.set_password('testPassword')\n self.student.save()\n self.student.userprofile.student_id = \"123TEST\"\n self.student.userprofile.organization = settings.LOCAL_ORGANIZATION\n self.student.userprofile.save()\n\n self.course = Course.objects.create(\n url=\"course\",\n name=\"Test Course\",\n code=\"123456\",\n )\n\n self.instance = CourseInstance.objects.create(\n course=self.course,\n url=\"instance\",\n instance_name=\"2016\",\n starting_time=self.now,\n ending_time=self.tomorrow,\n )\n self.instance.add_teacher(self.teacher.userprofile)\n self.instance.enroll_student(self.student)\n\n self.module = CourseModule.objects.create(\n course_instance=self.instance,\n url=\"module\",\n name=\"Test Module\",\n points_to_pass=10,\n opening_time=self.now,\n closing_time=self.tomorrow,\n late_submissions_allowed=True,\n late_submission_deadline=self.two_days_after,\n late_submission_penalty=0.2\n )\n self.module2 = CourseModule.objects.create(\n course_instance=self.instance,\n url=\"module2\",\n name=\"Test Module 2\",\n points_to_pass=0,\n opening_time=self.tomorrow,\n closing_time=self.two_days_after,\n )\n self.module0 = CourseModule.objects.create(\n course_instance=self.instance,\n url=\"module0\",\n name=\"Past Module\",\n points_to_pass=10,\n opening_time=self.two_days_before,\n closing_time=self.yesterday,\n )\n self.category = LearningObjectCategory.objects.create(\n course_instance=self.instance,\n name=\"Test Category\",\n points_to_pass=5,\n )\n\n self.exercise = StaticExercise.objects.create(\n course_module=self.module,\n category=self.category,\n url='e1',\n name=\"Test Exercise\",\n exercise_page_content='$$exercise$$content',\n submission_page_content='$$exercise$$received',\n points_to_pass=0,\n max_points=100,\n order=1,\n )\n self.exercise2 = StaticExercise.objects.create(\n course_module=self.module,\n category=self.category,\n url='e2',\n name=\"Test Exercise 2\",\n exercise_page_content='$$exercise2$$content',\n submission_page_content='$$exercise2$$received',\n points_to_pass=10,\n max_points=100,\n order=2,\n )\n self.exercise3 = StaticExercise.objects.create(\n course_module=self.module2,\n category=self.category,\n url='e3',\n name=\"Test Exercise 3\",\n exercise_page_content='$$exercise3$$content',\n submission_page_content='$$exercise3$$received',\n points_to_pass=0,\n max_points=100,\n )\n self.exercise0 = BaseExercise.objects.create(\n course_module=self.module0,\n category=self.category,\n url='b0',\n name=\"Base Exercise 0\",\n service_url=\"http://localhost/\",\n points_to_pass=0,\n max_points=100,\n min_group_size=1,\n max_group_size=2,\n )\n\n def setUpSubmissions(self):\n\n self.submission = Submission.objects.create(\n exercise=self.exercise,\n submission_data={'submission':1},\n feedback='$$submission$$feedback',\n )\n self.submission.submitters.add(self.student.userprofile)\n self.submission.set_points(1,2)\n self.submission.set_ready()\n self.submission.save()\n\n self.submission2 = Submission.objects.create(\n exercise=self.exercise,\n submission_data={'submission':2},\n )\n self.submission2.submitters.add(self.student.userprofile)\n\n self.submission3 = Submission.objects.create(\n exercise=self.exercise2,\n submission_data={'submission':3},\n )\n self.submission3.submitters.add(self.student.userprofile)\n self.submission3.submitters.add(self.user.userprofile)\n", "path": "lib/testdata.py"}], "after_files": [{"content": "from datetime import timedelta\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.test import TestCase\nfrom django.utils import timezone\n\nfrom course.models import (\n Course,\n CourseInstance,\n CourseModule,\n LearningObjectCategory,\n)\nfrom exercise.models import (\n BaseExercise,\n StaticExercise,\n Submission,\n)\n\n\nclass CourseTestCase(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n cls.setUpCourse()\n cls.setUpSubmissions()\n\n @classmethod\n def setUpCourse(self):\n\n self.now = timezone.now()\n self.tomorrow = self.now + timedelta(days=1)\n self.two_days_after = self.now + timedelta(days=2)\n self.three_days_after = self.now + timedelta(days=3)\n self.yesterday = self.now - timedelta(days=1)\n self.two_days_before = self.now - timedelta(days=2)\n\n self.user = User(username='testUser')\n self.user.set_password('testPassword')\n self.user.save()\n\n self.teacher = User(username='testTeacher')\n self.teacher.set_password('testPassword')\n self.teacher.save()\n\n self.student = User(username='testStudent')\n self.student.set_password('testPassword')\n self.student.save()\n self.student.userprofile.student_id = \"123TEST\"\n self.student.userprofile.organization = settings.LOCAL_ORGANIZATION\n self.student.userprofile.save()\n\n self.course = Course.objects.create(\n url=\"course\",\n name=\"Test Course\",\n code=\"123456\",\n )\n\n self.instance = CourseInstance.objects.create(\n course=self.course,\n url=\"instance\",\n instance_name=\"2016\",\n starting_time=self.now,\n ending_time=self.tomorrow,\n )\n self.instance.add_teacher(self.teacher.userprofile)\n self.instance.enroll_student(self.student)\n\n self.module = CourseModule.objects.create(\n course_instance=self.instance,\n url=\"module\",\n name=\"Test Module\",\n points_to_pass=10,\n opening_time=self.now,\n closing_time=self.tomorrow,\n late_submissions_allowed=True,\n late_submission_deadline=self.two_days_after,\n late_submission_penalty=0.2\n )\n self.module2 = CourseModule.objects.create(\n course_instance=self.instance,\n url=\"module2\",\n name=\"Test Module 2\",\n points_to_pass=0,\n opening_time=self.tomorrow,\n closing_time=self.two_days_after,\n )\n self.module0 = CourseModule.objects.create(\n course_instance=self.instance,\n url=\"module0\",\n name=\"Past Module\",\n points_to_pass=10,\n opening_time=self.two_days_before,\n closing_time=self.yesterday,\n )\n self.category = LearningObjectCategory.objects.create(\n course_instance=self.instance,\n name=\"Test Category\",\n points_to_pass=5,\n )\n\n self.exercise = StaticExercise.objects.create(\n course_module=self.module,\n category=self.category,\n url='e1',\n name=\"Test Exercise\",\n exercise_page_content='$$exercise$$content',\n submission_page_content='$$exercise$$received',\n points_to_pass=0,\n max_points=100,\n order=1,\n )\n self.exercise2 = StaticExercise.objects.create(\n course_module=self.module,\n category=self.category,\n url='e2',\n name=\"Test Exercise 2\",\n exercise_page_content='$$exercise2$$content',\n submission_page_content='$$exercise2$$received',\n points_to_pass=10,\n max_points=100,\n order=2,\n )\n self.exercise3 = StaticExercise.objects.create(\n course_module=self.module2,\n category=self.category,\n url='e3',\n name=\"Test Exercise 3\",\n exercise_page_content='$$exercise3$$content',\n submission_page_content='$$exercise3$$received',\n points_to_pass=0,\n max_points=100,\n )\n self.exercise0 = BaseExercise.objects.create(\n course_module=self.module0,\n category=self.category,\n url='b0',\n name=\"Base Exercise 0\",\n service_url=\"http://localhost/\",\n points_to_pass=0,\n max_points=100,\n min_group_size=1,\n max_group_size=2,\n )\n\n @classmethod\n def setUpSubmissions(self):\n\n self.submission = Submission.objects.create(\n exercise=self.exercise,\n submission_data={'submission':1},\n feedback='$$submission$$feedback',\n )\n self.submission.submitters.add(self.student.userprofile)\n self.submission.set_points(1,2)\n self.submission.set_ready()\n self.submission.save()\n\n self.submission2 = Submission.objects.create(\n exercise=self.exercise,\n submission_data={'submission':2},\n )\n self.submission2.submitters.add(self.student.userprofile)\n\n self.submission3 = Submission.objects.create(\n exercise=self.exercise2,\n submission_data={'submission':3},\n )\n self.submission3.submitters.add(self.student.userprofile)\n self.submission3.submitters.add(self.user.userprofile)\n", "path": "lib/testdata.py"}]} | 1,886 | 159 |
gh_patches_debug_1204 | rasdani/github-patches | git_diff | microsoft__playwright-python-1497 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Question]: How to get the right BrowserType from a device name?
### Your question
I noticed that the CLI is able to figure out the right `BrowserType` to use when it is launched from the command line:
```
playwright open --device="Desktop Safari" wikipedia.org # Webkit
playwright open --device="Desktop Firefox" wikipedia.org # Firefox
playwright open --device="Desktop Chrome" wikipedia.org # Chrome
```
But [the documentation](https://playwright.dev/python/docs/api/class-playwright#playwright-devices) seems to say that I have to initialize a `BrowserType` before I can pass the settings to the context, which partially defeats the purpose of the device settings.
I can implement my own logic do initialize the right `BrowserType` for each device, but as `playwright open` can already do that, that seems superfluous.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `playwright/_impl/_playwright.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Dict
16
17 from playwright._impl._browser_type import BrowserType
18 from playwright._impl._connection import ChannelOwner, from_channel
19 from playwright._impl._fetch import APIRequest
20 from playwright._impl._local_utils import LocalUtils
21 from playwright._impl._selectors import Selectors, SelectorsOwner
22
23
24 class Playwright(ChannelOwner):
25 devices: Dict
26 selectors: Selectors
27 chromium: BrowserType
28 firefox: BrowserType
29 webkit: BrowserType
30 request: APIRequest
31
32 def __init__(
33 self, parent: ChannelOwner, type: str, guid: str, initializer: Dict
34 ) -> None:
35 super().__init__(parent, type, guid, initializer)
36 self.request = APIRequest(self)
37 self.chromium = from_channel(initializer["chromium"])
38 self.chromium._playwright = self
39 self.firefox = from_channel(initializer["firefox"])
40 self.firefox._playwright = self
41 self.webkit = from_channel(initializer["webkit"])
42 self.webkit._playwright = self
43
44 self.selectors = Selectors(self._loop, self._dispatcher_fiber)
45 selectors_owner: SelectorsOwner = from_channel(initializer["selectors"])
46 self.selectors._add_channel(selectors_owner)
47
48 self._connection.on(
49 "close", lambda: self.selectors._remove_channel(selectors_owner)
50 )
51 self.devices = {}
52 self.devices = {
53 device["name"]: parse_device_descriptor(device["descriptor"])
54 for device in initializer["deviceDescriptors"]
55 }
56 self._utils: LocalUtils = from_channel(initializer["utils"])
57
58 def __getitem__(self, value: str) -> "BrowserType":
59 if value == "chromium":
60 return self.chromium
61 elif value == "firefox":
62 return self.firefox
63 elif value == "webkit":
64 return self.webkit
65 raise ValueError("Invalid browser " + value)
66
67 def _set_selectors(self, selectors: SelectorsOwner) -> None:
68 selectors_owner = from_channel(self._initializer["selectors"])
69 self.selectors._remove_channel(selectors_owner)
70 self.selectors = selectors
71 self.selectors._add_channel(selectors_owner)
72
73 def stop(self) -> None:
74 pass
75
76
77 def parse_device_descriptor(dict: Dict) -> Dict:
78 return {
79 "user_agent": dict["userAgent"],
80 "viewport": dict["viewport"],
81 "device_scale_factor": dict["deviceScaleFactor"],
82 "is_mobile": dict["isMobile"],
83 "has_touch": dict["hasTouch"],
84 }
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/playwright/_impl/_playwright.py b/playwright/_impl/_playwright.py
--- a/playwright/_impl/_playwright.py
+++ b/playwright/_impl/_playwright.py
@@ -81,4 +81,5 @@
"device_scale_factor": dict["deviceScaleFactor"],
"is_mobile": dict["isMobile"],
"has_touch": dict["hasTouch"],
+ "default_browser_type": dict["defaultBrowserType"],
}
| {"golden_diff": "diff --git a/playwright/_impl/_playwright.py b/playwright/_impl/_playwright.py\n--- a/playwright/_impl/_playwright.py\n+++ b/playwright/_impl/_playwright.py\n@@ -81,4 +81,5 @@\n \"device_scale_factor\": dict[\"deviceScaleFactor\"],\n \"is_mobile\": dict[\"isMobile\"],\n \"has_touch\": dict[\"hasTouch\"],\n+ \"default_browser_type\": dict[\"defaultBrowserType\"],\n }\n", "issue": "[Question]: How to get the right BrowserType from a device name? \n### Your question\n\nI noticed that the CLI is able to figure out the right `BrowserType` to use when it is launched from the command line:\r\n\r\n```\r\nplaywright open --device=\"Desktop Safari\" wikipedia.org # Webkit\r\nplaywright open --device=\"Desktop Firefox\" wikipedia.org # Firefox\r\nplaywright open --device=\"Desktop Chrome\" wikipedia.org # Chrome\r\n``` \r\n\r\nBut [the documentation](https://playwright.dev/python/docs/api/class-playwright#playwright-devices) seems to say that I have to initialize a `BrowserType` before I can pass the settings to the context, which partially defeats the purpose of the device settings.\r\n\r\nI can implement my own logic do initialize the right `BrowserType` for each device, but as `playwright open` can already do that, that seems superfluous.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict\n\nfrom playwright._impl._browser_type import BrowserType\nfrom playwright._impl._connection import ChannelOwner, from_channel\nfrom playwright._impl._fetch import APIRequest\nfrom playwright._impl._local_utils import LocalUtils\nfrom playwright._impl._selectors import Selectors, SelectorsOwner\n\n\nclass Playwright(ChannelOwner):\n devices: Dict\n selectors: Selectors\n chromium: BrowserType\n firefox: BrowserType\n webkit: BrowserType\n request: APIRequest\n\n def __init__(\n self, parent: ChannelOwner, type: str, guid: str, initializer: Dict\n ) -> None:\n super().__init__(parent, type, guid, initializer)\n self.request = APIRequest(self)\n self.chromium = from_channel(initializer[\"chromium\"])\n self.chromium._playwright = self\n self.firefox = from_channel(initializer[\"firefox\"])\n self.firefox._playwright = self\n self.webkit = from_channel(initializer[\"webkit\"])\n self.webkit._playwright = self\n\n self.selectors = Selectors(self._loop, self._dispatcher_fiber)\n selectors_owner: SelectorsOwner = from_channel(initializer[\"selectors\"])\n self.selectors._add_channel(selectors_owner)\n\n self._connection.on(\n \"close\", lambda: self.selectors._remove_channel(selectors_owner)\n )\n self.devices = {}\n self.devices = {\n device[\"name\"]: parse_device_descriptor(device[\"descriptor\"])\n for device in initializer[\"deviceDescriptors\"]\n }\n self._utils: LocalUtils = from_channel(initializer[\"utils\"])\n\n def __getitem__(self, value: str) -> \"BrowserType\":\n if value == \"chromium\":\n return self.chromium\n elif value == \"firefox\":\n return self.firefox\n elif value == \"webkit\":\n return self.webkit\n raise ValueError(\"Invalid browser \" + value)\n\n def _set_selectors(self, selectors: SelectorsOwner) -> None:\n selectors_owner = from_channel(self._initializer[\"selectors\"])\n self.selectors._remove_channel(selectors_owner)\n self.selectors = selectors\n self.selectors._add_channel(selectors_owner)\n\n def stop(self) -> None:\n pass\n\n\ndef parse_device_descriptor(dict: Dict) -> Dict:\n return {\n \"user_agent\": dict[\"userAgent\"],\n \"viewport\": dict[\"viewport\"],\n \"device_scale_factor\": dict[\"deviceScaleFactor\"],\n \"is_mobile\": dict[\"isMobile\"],\n \"has_touch\": dict[\"hasTouch\"],\n }\n", "path": "playwright/_impl/_playwright.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict\n\nfrom playwright._impl._browser_type import BrowserType\nfrom playwright._impl._connection import ChannelOwner, from_channel\nfrom playwright._impl._fetch import APIRequest\nfrom playwright._impl._local_utils import LocalUtils\nfrom playwright._impl._selectors import Selectors, SelectorsOwner\n\n\nclass Playwright(ChannelOwner):\n devices: Dict\n selectors: Selectors\n chromium: BrowserType\n firefox: BrowserType\n webkit: BrowserType\n request: APIRequest\n\n def __init__(\n self, parent: ChannelOwner, type: str, guid: str, initializer: Dict\n ) -> None:\n super().__init__(parent, type, guid, initializer)\n self.request = APIRequest(self)\n self.chromium = from_channel(initializer[\"chromium\"])\n self.chromium._playwright = self\n self.firefox = from_channel(initializer[\"firefox\"])\n self.firefox._playwright = self\n self.webkit = from_channel(initializer[\"webkit\"])\n self.webkit._playwright = self\n\n self.selectors = Selectors(self._loop, self._dispatcher_fiber)\n selectors_owner: SelectorsOwner = from_channel(initializer[\"selectors\"])\n self.selectors._add_channel(selectors_owner)\n\n self._connection.on(\n \"close\", lambda: self.selectors._remove_channel(selectors_owner)\n )\n self.devices = {}\n self.devices = {\n device[\"name\"]: parse_device_descriptor(device[\"descriptor\"])\n for device in initializer[\"deviceDescriptors\"]\n }\n self._utils: LocalUtils = from_channel(initializer[\"utils\"])\n\n def __getitem__(self, value: str) -> \"BrowserType\":\n if value == \"chromium\":\n return self.chromium\n elif value == \"firefox\":\n return self.firefox\n elif value == \"webkit\":\n return self.webkit\n raise ValueError(\"Invalid browser \" + value)\n\n def _set_selectors(self, selectors: SelectorsOwner) -> None:\n selectors_owner = from_channel(self._initializer[\"selectors\"])\n self.selectors._remove_channel(selectors_owner)\n self.selectors = selectors\n self.selectors._add_channel(selectors_owner)\n\n def stop(self) -> None:\n pass\n\n\ndef parse_device_descriptor(dict: Dict) -> Dict:\n return {\n \"user_agent\": dict[\"userAgent\"],\n \"viewport\": dict[\"viewport\"],\n \"device_scale_factor\": dict[\"deviceScaleFactor\"],\n \"is_mobile\": dict[\"isMobile\"],\n \"has_touch\": dict[\"hasTouch\"],\n \"default_browser_type\": dict[\"defaultBrowserType\"],\n }\n", "path": "playwright/_impl/_playwright.py"}]} | 1,287 | 103 |
gh_patches_debug_11614 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-295 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enforce new SECRET_KEY in production environments
The server should refuse to start if the SECRET_KEY does not differ from the default value in our repository.
The importance of this issue depends on whether the Federated Cloud feature is using this key or not.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/cms/apps.py`
Content:
```
1 """
2 Django related class representing a config of an app
3 """
4 from django.apps import AppConfig
5
6
7 class CmsConfig(AppConfig):
8 """
9 Class inheriting the django AppConfig
10 """
11
12 name = 'cms'
13
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/backend/cms/apps.py b/backend/cms/apps.py
--- a/backend/cms/apps.py
+++ b/backend/cms/apps.py
@@ -1,8 +1,12 @@
"""
Django related class representing a config of an app
"""
+import logging
+import sys
+from django.conf import settings
from django.apps import AppConfig
+logger = logging.getLogger(__name__)
class CmsConfig(AppConfig):
"""
@@ -10,3 +14,8 @@
"""
name = 'cms'
+
+ def ready(self):
+ if settings.SECRET_KEY == '-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_' and not settings.DEBUG:
+ logger.error("You are running the Integreat CMS in production mode. Change the SECRET_KEY in the settings.py!")
+ sys.exit(1)
| {"golden_diff": "diff --git a/backend/cms/apps.py b/backend/cms/apps.py\n--- a/backend/cms/apps.py\n+++ b/backend/cms/apps.py\n@@ -1,8 +1,12 @@\n \"\"\"\n Django related class representing a config of an app\n \"\"\"\n+import logging\n+import sys\n+from django.conf import settings\n from django.apps import AppConfig\n \n+logger = logging.getLogger(__name__)\n \n class CmsConfig(AppConfig):\n \"\"\"\n@@ -10,3 +14,8 @@\n \"\"\"\n \n name = 'cms'\n+\n+ def ready(self):\n+ if settings.SECRET_KEY == '-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_' and not settings.DEBUG:\n+ logger.error(\"You are running the Integreat CMS in production mode. Change the SECRET_KEY in the settings.py!\")\n+ sys.exit(1)\n", "issue": "Enforce new SECRET_KEY in production environments\nThe server should refuse to start if the SECRET_KEY does not differ from the default value in our repository.\r\n\r\nThe importance of this issue depends on whether the Federated Cloud feature is using this key or not.\n", "before_files": [{"content": "\"\"\"\nDjango related class representing a config of an app\n\"\"\"\nfrom django.apps import AppConfig\n\n\nclass CmsConfig(AppConfig):\n \"\"\"\n Class inheriting the django AppConfig\n \"\"\"\n\n name = 'cms'\n", "path": "backend/cms/apps.py"}], "after_files": [{"content": "\"\"\"\nDjango related class representing a config of an app\n\"\"\"\nimport logging\nimport sys\nfrom django.conf import settings\nfrom django.apps import AppConfig\n\nlogger = logging.getLogger(__name__)\n\nclass CmsConfig(AppConfig):\n \"\"\"\n Class inheriting the django AppConfig\n \"\"\"\n\n name = 'cms'\n\n def ready(self):\n if settings.SECRET_KEY == '-!v282$zj815_q@htaxcubylo)(l%a+k*-xi78hw*#s2@i86@_' and not settings.DEBUG:\n logger.error(\"You are running the Integreat CMS in production mode. Change the SECRET_KEY in the settings.py!\")\n sys.exit(1)\n", "path": "backend/cms/apps.py"}]} | 371 | 204 |
gh_patches_debug_1858 | rasdani/github-patches | git_diff | huggingface__accelerate-127 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error when loading optimizer state
Thanks for this awesome product!!
When I try to load optimizer state on TPUs, I get an error, since self.device_placement is never initialized in AcceleratedOptimizer..!
https://github.com/huggingface/accelerate/blob/e0a420f7cb32124cadeeae690b56e463f8fc598f/src/accelerate/optimizer.py#L83-L86
Error when loading optimizer state
Thanks for this awesome product!!
When I try to load optimizer state on TPUs, I get an error, since self.device_placement is never initialized in AcceleratedOptimizer..!
https://github.com/huggingface/accelerate/blob/e0a420f7cb32124cadeeae690b56e463f8fc598f/src/accelerate/optimizer.py#L83-L86
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/accelerate/optimizer.py`
Content:
```
1 # Copyright 2021 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import torch
16
17 from packaging import version
18
19 from .state import AcceleratorState, DistributedType, is_tpu_available
20 from .utils import honor_type
21
22
23 if is_tpu_available():
24 import torch_xla.core.xla_model as xm
25
26
27 def move_to_device(state, device):
28 if isinstance(state, (list, tuple)):
29 return honor_type(state, (move_to_device(t, device) for t in state))
30 elif isinstance(state, dict):
31 return type(state)({k: move_to_device(v, device) for k, v in state.items()})
32 elif isinstance(state, torch.Tensor):
33 return state.to(device)
34 return state
35
36
37 class AcceleratedOptimizer(torch.optim.Optimizer):
38 """
39 Internal wrapper around a torch optimizer.
40
41 Args:
42 optimizer (:obj:`torch.optim.optimizer.Optimizer`):
43 The optimizer to wrap.
44 device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):
45 Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of
46 :obj:`optimizer` on the right device.
47 scaler (:obj:`torch.cuda.amp.grad_scaler.GradScaler`, `optional`):
48 The scaler to use in the step function if training with mixed precision.
49 """
50
51 def __init__(self, optimizer, device_placement=True, scaler=None):
52 self.optimizer = optimizer
53 self.scaler = scaler
54 self.state = AcceleratorState()
55
56 # Handle device placement
57 if device_placement:
58 state_dict = self.optimizer.state_dict()
59 if self.state.distributed_type == DistributedType.TPU:
60 xm.send_cpu_data_to_device(state_dict, self.state.device)
61 else:
62 state_dict = move_to_device(state_dict, self.state.device)
63 self.optimizer.load_state_dict(state_dict)
64
65 @property
66 def param_groups(self):
67 return self.optimizer.param_groups
68
69 @param_groups.setter
70 def param_groups(self, param_groups):
71 self.optimizer.param_groups = param_groups
72
73 @property
74 def defaults(self):
75 return self.optimizer.defaults
76
77 @defaults.setter
78 def defaults(self, defaults):
79 self.optimizer.defaults = defaults
80
81 def add_param_group(self, param_group):
82 self.optimizer.add_param_group(param_group)
83
84 def load_state_dict(self, state_dict):
85 if self.state.distributed_type == DistributedType.TPU and self.device_placement:
86 xm.send_cpu_data_to_device(state_dict, self.state.device)
87 self.optimizer.load_state_dict(state_dict)
88
89 def state_dict(self):
90 return self.optimizer.state_dict()
91
92 def zero_grad(self, set_to_none=None):
93 if version.parse(torch.__version__) < version.parse("1.7.0"):
94 if set_to_none is not None:
95 raise ValueError(
96 "`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for "
97 f"earlier versions (found version {torch.__version__})."
98 )
99 self.optimizer.zero_grad()
100 else:
101 if set_to_none is not None:
102 set_to_none = False
103 self.optimizer.zero_grad(set_to_none=set_to_none)
104
105 def step(self, closure=None):
106 if self.state.distributed_type == DistributedType.TPU:
107 optimizer_args = {"closure": closure} if closure is not None else {}
108 xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)
109 elif self.scaler is not None:
110 self.scaler.step(self.optimizer, closure)
111 self.scaler.update()
112 else:
113 self.optimizer.step(closure)
114
115 def _switch_parameters(self, parameters_map):
116 for param_group in self.optimizer.param_groups:
117 param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]]
118
119 @property
120 def is_overflow(self):
121 """This needs to be implemented at the end"""
122 return False # TODO: implement it
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py
--- a/src/accelerate/optimizer.py
+++ b/src/accelerate/optimizer.py
@@ -52,6 +52,7 @@
self.optimizer = optimizer
self.scaler = scaler
self.state = AcceleratorState()
+ self.device_placement = device_placement
# Handle device placement
if device_placement:
| {"golden_diff": "diff --git a/src/accelerate/optimizer.py b/src/accelerate/optimizer.py\n--- a/src/accelerate/optimizer.py\n+++ b/src/accelerate/optimizer.py\n@@ -52,6 +52,7 @@\n self.optimizer = optimizer\n self.scaler = scaler\n self.state = AcceleratorState()\n+ self.device_placement = device_placement\n \n # Handle device placement\n if device_placement:\n", "issue": "Error when loading optimizer state\nThanks for this awesome product!!\r\n\r\nWhen I try to load optimizer state on TPUs, I get an error, since self.device_placement is never initialized in AcceleratedOptimizer..!\r\nhttps://github.com/huggingface/accelerate/blob/e0a420f7cb32124cadeeae690b56e463f8fc598f/src/accelerate/optimizer.py#L83-L86\nError when loading optimizer state\nThanks for this awesome product!!\r\n\r\nWhen I try to load optimizer state on TPUs, I get an error, since self.device_placement is never initialized in AcceleratedOptimizer..!\r\nhttps://github.com/huggingface/accelerate/blob/e0a420f7cb32124cadeeae690b56e463f8fc598f/src/accelerate/optimizer.py#L83-L86\n", "before_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\n\nfrom packaging import version\n\nfrom .state import AcceleratorState, DistributedType, is_tpu_available\nfrom .utils import honor_type\n\n\nif is_tpu_available():\n import torch_xla.core.xla_model as xm\n\n\ndef move_to_device(state, device):\n if isinstance(state, (list, tuple)):\n return honor_type(state, (move_to_device(t, device) for t in state))\n elif isinstance(state, dict):\n return type(state)({k: move_to_device(v, device) for k, v in state.items()})\n elif isinstance(state, torch.Tensor):\n return state.to(device)\n return state\n\n\nclass AcceleratedOptimizer(torch.optim.Optimizer):\n \"\"\"\n Internal wrapper around a torch optimizer.\n\n Args:\n optimizer (:obj:`torch.optim.optimizer.Optimizer`):\n The optimizer to wrap.\n device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of\n :obj:`optimizer` on the right device.\n scaler (:obj:`torch.cuda.amp.grad_scaler.GradScaler`, `optional`):\n The scaler to use in the step function if training with mixed precision.\n \"\"\"\n\n def __init__(self, optimizer, device_placement=True, scaler=None):\n self.optimizer = optimizer\n self.scaler = scaler\n self.state = AcceleratorState()\n\n # Handle device placement\n if device_placement:\n state_dict = self.optimizer.state_dict()\n if self.state.distributed_type == DistributedType.TPU:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n else:\n state_dict = move_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n @property\n def param_groups(self):\n return self.optimizer.param_groups\n\n @param_groups.setter\n def param_groups(self, param_groups):\n self.optimizer.param_groups = param_groups\n\n @property\n def defaults(self):\n return self.optimizer.defaults\n\n @defaults.setter\n def defaults(self, defaults):\n self.optimizer.defaults = defaults\n\n def add_param_group(self, param_group):\n self.optimizer.add_param_group(param_group)\n\n def load_state_dict(self, state_dict):\n if self.state.distributed_type == DistributedType.TPU and self.device_placement:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n def state_dict(self):\n return self.optimizer.state_dict()\n\n def zero_grad(self, set_to_none=None):\n if version.parse(torch.__version__) < version.parse(\"1.7.0\"):\n if set_to_none is not None:\n raise ValueError(\n \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n f\"earlier versions (found version {torch.__version__}).\"\n )\n self.optimizer.zero_grad()\n else:\n if set_to_none is not None:\n set_to_none = False\n self.optimizer.zero_grad(set_to_none=set_to_none)\n\n def step(self, closure=None):\n if self.state.distributed_type == DistributedType.TPU:\n optimizer_args = {\"closure\": closure} if closure is not None else {}\n xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)\n elif self.scaler is not None:\n self.scaler.step(self.optimizer, closure)\n self.scaler.update()\n else:\n self.optimizer.step(closure)\n\n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\n param_group[\"params\"] = [parameters_map.get(p, p) for p in param_group[\"params\"]]\n\n @property\n def is_overflow(self):\n \"\"\"This needs to be implemented at the end\"\"\"\n return False # TODO: implement it\n", "path": "src/accelerate/optimizer.py"}], "after_files": [{"content": "# Copyright 2021 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\n\nfrom packaging import version\n\nfrom .state import AcceleratorState, DistributedType, is_tpu_available\nfrom .utils import honor_type\n\n\nif is_tpu_available():\n import torch_xla.core.xla_model as xm\n\n\ndef move_to_device(state, device):\n if isinstance(state, (list, tuple)):\n return honor_type(state, (move_to_device(t, device) for t in state))\n elif isinstance(state, dict):\n return type(state)({k: move_to_device(v, device) for k, v in state.items()})\n elif isinstance(state, torch.Tensor):\n return state.to(device)\n return state\n\n\nclass AcceleratedOptimizer(torch.optim.Optimizer):\n \"\"\"\n Internal wrapper around a torch optimizer.\n\n Args:\n optimizer (:obj:`torch.optim.optimizer.Optimizer`):\n The optimizer to wrap.\n device_placement (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of\n :obj:`optimizer` on the right device.\n scaler (:obj:`torch.cuda.amp.grad_scaler.GradScaler`, `optional`):\n The scaler to use in the step function if training with mixed precision.\n \"\"\"\n\n def __init__(self, optimizer, device_placement=True, scaler=None):\n self.optimizer = optimizer\n self.scaler = scaler\n self.state = AcceleratorState()\n self.device_placement = device_placement\n\n # Handle device placement\n if device_placement:\n state_dict = self.optimizer.state_dict()\n if self.state.distributed_type == DistributedType.TPU:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n else:\n state_dict = move_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n @property\n def param_groups(self):\n return self.optimizer.param_groups\n\n @param_groups.setter\n def param_groups(self, param_groups):\n self.optimizer.param_groups = param_groups\n\n @property\n def defaults(self):\n return self.optimizer.defaults\n\n @defaults.setter\n def defaults(self, defaults):\n self.optimizer.defaults = defaults\n\n def add_param_group(self, param_group):\n self.optimizer.add_param_group(param_group)\n\n def load_state_dict(self, state_dict):\n if self.state.distributed_type == DistributedType.TPU and self.device_placement:\n xm.send_cpu_data_to_device(state_dict, self.state.device)\n self.optimizer.load_state_dict(state_dict)\n\n def state_dict(self):\n return self.optimizer.state_dict()\n\n def zero_grad(self, set_to_none=None):\n if version.parse(torch.__version__) < version.parse(\"1.7.0\"):\n if set_to_none is not None:\n raise ValueError(\n \"`set_to_none` for Optimizer.zero_grad` was introduced in PyTorch 1.7.0 and can't be used for \"\n f\"earlier versions (found version {torch.__version__}).\"\n )\n self.optimizer.zero_grad()\n else:\n if set_to_none is not None:\n set_to_none = False\n self.optimizer.zero_grad(set_to_none=set_to_none)\n\n def step(self, closure=None):\n if self.state.distributed_type == DistributedType.TPU:\n optimizer_args = {\"closure\": closure} if closure is not None else {}\n xm.optimizer_step(self.optimizer, optimizer_args=optimizer_args)\n elif self.scaler is not None:\n self.scaler.step(self.optimizer, closure)\n self.scaler.update()\n else:\n self.optimizer.step(closure)\n\n def _switch_parameters(self, parameters_map):\n for param_group in self.optimizer.param_groups:\n param_group[\"params\"] = [parameters_map.get(p, p) for p in param_group[\"params\"]]\n\n @property\n def is_overflow(self):\n \"\"\"This needs to be implemented at the end\"\"\"\n return False # TODO: implement it\n", "path": "src/accelerate/optimizer.py"}]} | 1,705 | 93 |
gh_patches_debug_15427 | rasdani/github-patches | git_diff | google__jax-574 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
jax.config doesn't report command-line flags correctly
Another one from @jmgilmer and I - I don't think the jax config is parsing command line flags correctly. I don't know if this is functionally important or just a reporting error - but it is certainly important for user scripts knowing what flags have been set.
If I run this script:
```
from absl import app, flags
from jax.config import config
FLAGS = flags.FLAGS
def main(_):
print("FLAGS.jax_enable_x64", FLAGS.jax_enable_x64)
print("FLAGS.jax_debug_nans", FLAGS.jax_debug_nans)
print(config.values)
if __name__ == "__main__":
config.config_with_absl()
app.run(main)
```
I get the following problem: jax.config doesn't report the correct flag settings.
```
> python jaxtest.py --jax_enable_x64=1 --jax_debug_nans=1
FLAGS.jax_enable_x64 True
FLAGS.jax_debug_nans True
{'jax_enable_x64': 0, 'jax_xla_backend': 'xla', 'jax_backend_target': 'local', 'jax_platform_name': '', 'jax_device_values': 1, 'jax_debug_nans': 0, 'jax_disable_jit': 0}
```
if I run the same with envvars instead it works:
```
JAX_ENABLE_X64=1 JAX_DEBUG_NANS=1 python jaxtest.py
FLAGS.jax_enable_x64 True
FLAGS.jax_debug_nans True
{'jax_enable_x64': 1, 'jax_xla_backend': 'xla', 'jax_backend_target': 'local', 'jax_platform_name': '', 'jax_device_values': 1, 'jax_debug_nans': 1, 'jax_disable_jit': 0}
```
I've tried parsing the flags in different ways but nothing seems to fix the issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jax/config.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import sys
16
17
18 class Config(object):
19 def __init__(self):
20 self.values = {}
21 self.meta = {}
22 self.FLAGS = NameSpace(self.read)
23 self.use_absl = False
24
25 def update(self, name, val):
26 self.check_exists(name)
27 if name not in self.values:
28 raise Exception("Unrecognized config option: {}".format(name))
29 self.values[name] = val
30
31 def read(self, name):
32 if self.use_absl:
33 return getattr(self.absl_flags.FLAGS, name)
34 else:
35 self.check_exists(name)
36 return self.values[name]
37
38 def add_option(self, name, default, opt_type, meta_args, meta_kwargs):
39 if name in self.values:
40 raise Exception("Config option {} already defined".format(name))
41 self.values[name] = default
42 self.meta[name] = (opt_type, meta_args, meta_kwargs)
43
44 def check_exists(self, name):
45 if name not in self.values:
46 raise Exception("Unrecognized config option: {}".format(name))
47
48 def DEFINE_bool(self, name, default, *args, **kwargs):
49 self.add_option(name, default, bool, args, kwargs)
50
51 def DEFINE_integer(self, name, default, *args, **kwargs):
52 self.add_option(name, default, int, args, kwargs)
53
54 def DEFINE_string(self, name, default, *args, **kwargs):
55 self.add_option(name, default, str, args, kwargs)
56
57 def DEFINE_enum(self, name, default, *args, **kwargs):
58 self.add_option(name, default, 'enum', args, kwargs)
59
60 def config_with_absl(self):
61 # Run this before calling `app.run(main)` etc
62 import absl.flags as absl_FLAGS
63 from absl import app, flags as absl_flags
64
65 self.use_absl = True
66 self.absl_flags = absl_flags
67 absl_defs = { bool: absl_flags.DEFINE_bool,
68 int: absl_flags.DEFINE_integer,
69 str: absl_flags.DEFINE_string,
70 'enum': absl_flags.DEFINE_enum }
71
72 for name, val in self.values.items():
73 flag_type, meta_args, meta_kwargs = self.meta[name]
74 absl_defs[flag_type](name, val, *meta_args, **meta_kwargs)
75
76 def complete_absl_config(self, absl_flags):
77 for name, _ in self.values.items():
78 self.update(name, getattr(absl_flags.FLAGS, name))
79
80 def parse_flags_with_absl(self):
81 global already_configured_with_absl
82 if not already_configured_with_absl:
83 import absl.flags
84 self.config_with_absl()
85 absl.flags.FLAGS(sys.argv)
86 already_configured_with_absl = True
87
88
89 class NameSpace(object):
90 def __init__(self, getter):
91 self._getter = getter
92
93 def __getattr__(self, name):
94 return self._getter(name)
95
96
97 config = Config()
98 flags = config
99 already_configured_with_absl = False
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/jax/config.py b/jax/config.py
--- a/jax/config.py
+++ b/jax/config.py
@@ -73,6 +73,8 @@
flag_type, meta_args, meta_kwargs = self.meta[name]
absl_defs[flag_type](name, val, *meta_args, **meta_kwargs)
+ app.call_after_init(lambda: self.complete_absl_config(absl_flags))
+
def complete_absl_config(self, absl_flags):
for name, _ in self.values.items():
self.update(name, getattr(absl_flags.FLAGS, name))
@@ -83,6 +85,7 @@
import absl.flags
self.config_with_absl()
absl.flags.FLAGS(sys.argv)
+ self.complete_absl_config(absl.flags)
already_configured_with_absl = True
| {"golden_diff": "diff --git a/jax/config.py b/jax/config.py\n--- a/jax/config.py\n+++ b/jax/config.py\n@@ -73,6 +73,8 @@\n flag_type, meta_args, meta_kwargs = self.meta[name]\n absl_defs[flag_type](name, val, *meta_args, **meta_kwargs)\n \n+ app.call_after_init(lambda: self.complete_absl_config(absl_flags))\n+\n def complete_absl_config(self, absl_flags):\n for name, _ in self.values.items():\n self.update(name, getattr(absl_flags.FLAGS, name))\n@@ -83,6 +85,7 @@\n import absl.flags\n self.config_with_absl()\n absl.flags.FLAGS(sys.argv)\n+ self.complete_absl_config(absl.flags)\n already_configured_with_absl = True\n", "issue": "jax.config doesn't report command-line flags correctly\nAnother one from @jmgilmer and I - I don't think the jax config is parsing command line flags correctly. I don't know if this is functionally important or just a reporting error - but it is certainly important for user scripts knowing what flags have been set.\r\n\r\nIf I run this script:\r\n```\r\nfrom absl import app, flags\r\nfrom jax.config import config\r\nFLAGS = flags.FLAGS\r\ndef main(_):\r\n print(\"FLAGS.jax_enable_x64\", FLAGS.jax_enable_x64)\r\n print(\"FLAGS.jax_debug_nans\", FLAGS.jax_debug_nans)\r\n print(config.values)\r\nif __name__ == \"__main__\":\r\n config.config_with_absl()\r\n app.run(main)\r\n```\r\nI get the following problem: jax.config doesn't report the correct flag settings.\r\n```\r\n> python jaxtest.py --jax_enable_x64=1 --jax_debug_nans=1\r\nFLAGS.jax_enable_x64 True\r\nFLAGS.jax_debug_nans True\r\n{'jax_enable_x64': 0, 'jax_xla_backend': 'xla', 'jax_backend_target': 'local', 'jax_platform_name': '', 'jax_device_values': 1, 'jax_debug_nans': 0, 'jax_disable_jit': 0}\r\n```\r\nif I run the same with envvars instead it works:\r\n```\r\nJAX_ENABLE_X64=1 JAX_DEBUG_NANS=1 python jaxtest.py\r\nFLAGS.jax_enable_x64 True\r\nFLAGS.jax_debug_nans True\r\n{'jax_enable_x64': 1, 'jax_xla_backend': 'xla', 'jax_backend_target': 'local', 'jax_platform_name': '', 'jax_device_values': 1, 'jax_debug_nans': 1, 'jax_disable_jit': 0}\r\n```\r\n\r\nI've tried parsing the flags in different ways but nothing seems to fix the issue.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\n\nclass Config(object):\n def __init__(self):\n self.values = {}\n self.meta = {}\n self.FLAGS = NameSpace(self.read)\n self.use_absl = False\n\n def update(self, name, val):\n self.check_exists(name)\n if name not in self.values:\n raise Exception(\"Unrecognized config option: {}\".format(name))\n self.values[name] = val\n\n def read(self, name):\n if self.use_absl:\n return getattr(self.absl_flags.FLAGS, name)\n else:\n self.check_exists(name)\n return self.values[name]\n\n def add_option(self, name, default, opt_type, meta_args, meta_kwargs):\n if name in self.values:\n raise Exception(\"Config option {} already defined\".format(name))\n self.values[name] = default\n self.meta[name] = (opt_type, meta_args, meta_kwargs)\n\n def check_exists(self, name):\n if name not in self.values:\n raise Exception(\"Unrecognized config option: {}\".format(name))\n\n def DEFINE_bool(self, name, default, *args, **kwargs):\n self.add_option(name, default, bool, args, kwargs)\n\n def DEFINE_integer(self, name, default, *args, **kwargs):\n self.add_option(name, default, int, args, kwargs)\n\n def DEFINE_string(self, name, default, *args, **kwargs):\n self.add_option(name, default, str, args, kwargs)\n\n def DEFINE_enum(self, name, default, *args, **kwargs):\n self.add_option(name, default, 'enum', args, kwargs)\n\n def config_with_absl(self):\n # Run this before calling `app.run(main)` etc\n import absl.flags as absl_FLAGS\n from absl import app, flags as absl_flags\n\n self.use_absl = True\n self.absl_flags = absl_flags\n absl_defs = { bool: absl_flags.DEFINE_bool,\n int: absl_flags.DEFINE_integer,\n str: absl_flags.DEFINE_string,\n 'enum': absl_flags.DEFINE_enum }\n\n for name, val in self.values.items():\n flag_type, meta_args, meta_kwargs = self.meta[name]\n absl_defs[flag_type](name, val, *meta_args, **meta_kwargs)\n\n def complete_absl_config(self, absl_flags):\n for name, _ in self.values.items():\n self.update(name, getattr(absl_flags.FLAGS, name))\n\n def parse_flags_with_absl(self):\n global already_configured_with_absl\n if not already_configured_with_absl:\n import absl.flags\n self.config_with_absl()\n absl.flags.FLAGS(sys.argv)\n already_configured_with_absl = True\n\n\nclass NameSpace(object):\n def __init__(self, getter):\n self._getter = getter\n\n def __getattr__(self, name):\n return self._getter(name)\n\n\nconfig = Config()\nflags = config\nalready_configured_with_absl = False\n", "path": "jax/config.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\n\nclass Config(object):\n def __init__(self):\n self.values = {}\n self.meta = {}\n self.FLAGS = NameSpace(self.read)\n self.use_absl = False\n\n def update(self, name, val):\n self.check_exists(name)\n if name not in self.values:\n raise Exception(\"Unrecognized config option: {}\".format(name))\n self.values[name] = val\n\n def read(self, name):\n if self.use_absl:\n return getattr(self.absl_flags.FLAGS, name)\n else:\n self.check_exists(name)\n return self.values[name]\n\n def add_option(self, name, default, opt_type, meta_args, meta_kwargs):\n if name in self.values:\n raise Exception(\"Config option {} already defined\".format(name))\n self.values[name] = default\n self.meta[name] = (opt_type, meta_args, meta_kwargs)\n\n def check_exists(self, name):\n if name not in self.values:\n raise Exception(\"Unrecognized config option: {}\".format(name))\n\n def DEFINE_bool(self, name, default, *args, **kwargs):\n self.add_option(name, default, bool, args, kwargs)\n\n def DEFINE_integer(self, name, default, *args, **kwargs):\n self.add_option(name, default, int, args, kwargs)\n\n def DEFINE_string(self, name, default, *args, **kwargs):\n self.add_option(name, default, str, args, kwargs)\n\n def DEFINE_enum(self, name, default, *args, **kwargs):\n self.add_option(name, default, 'enum', args, kwargs)\n\n def config_with_absl(self):\n # Run this before calling `app.run(main)` etc\n import absl.flags as absl_FLAGS\n from absl import app, flags as absl_flags\n\n self.use_absl = True\n self.absl_flags = absl_flags\n absl_defs = { bool: absl_flags.DEFINE_bool,\n int: absl_flags.DEFINE_integer,\n str: absl_flags.DEFINE_string,\n 'enum': absl_flags.DEFINE_enum }\n\n for name, val in self.values.items():\n flag_type, meta_args, meta_kwargs = self.meta[name]\n absl_defs[flag_type](name, val, *meta_args, **meta_kwargs)\n\n app.call_after_init(lambda: self.complete_absl_config(absl_flags))\n\n def complete_absl_config(self, absl_flags):\n for name, _ in self.values.items():\n self.update(name, getattr(absl_flags.FLAGS, name))\n\n def parse_flags_with_absl(self):\n global already_configured_with_absl\n if not already_configured_with_absl:\n import absl.flags\n self.config_with_absl()\n absl.flags.FLAGS(sys.argv)\n self.complete_absl_config(absl.flags)\n already_configured_with_absl = True\n\n\nclass NameSpace(object):\n def __init__(self, getter):\n self._getter = getter\n\n def __getattr__(self, name):\n return self._getter(name)\n\n\nconfig = Config()\nflags = config\nalready_configured_with_absl = False\n", "path": "jax/config.py"}]} | 1,641 | 180 |
gh_patches_debug_15851 | rasdani/github-patches | git_diff | lk-geimfari__mimesis-875 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add pricetag support
# Feature request
## Thesis
Create a localized pricetag:
```
>>> mimesis.Business().pricetag()
"$ 12.34"
>>> mimesis.Business("de").pricetag(minimum=1000, maximum=1500)
"1.234,56 €"
>>> mimesis.Business("cs").pricetag()
"75,20 Kč" # rounded to 0.1
```
with defaults minimum=0.01 and maximum=1000.00 as a proposal for the default provider. Localized providers would define their own values, because of huge differences between the used values, such as USD/EUR/GBP going down to 0.01, and some minor currencies using millions as base.
## Reasoning
This is very locale dependent. Currency is already in the `CURRENCY_SYMBOLS`, but what would be the best place for localized number and price formatting as well as the localized defaults for minimum/maximum?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mimesis/providers/business.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Business data provider."""
4
5 from mimesis.data import (
6 CRYPTOCURRENCY_ISO_CODES,
7 CRYPTOCURRENCY_SYMBOLS,
8 CURRENCY_ISO_CODES,
9 CURRENCY_SYMBOLS,
10 )
11 from mimesis.providers.base import BaseDataProvider
12
13 __all__ = ['Business']
14
15
16 class Business(BaseDataProvider):
17 """Class for generating data for business."""
18
19 def __init__(self, *args, **kwargs):
20 """Initialize attributes.
21
22 :param locale: Current locale.
23 """
24 super().__init__(*args, **kwargs)
25 self._datafile = 'business.json'
26 self.pull(self._datafile)
27
28 class Meta:
29 """Class for metadata."""
30
31 name = 'business'
32
33 def company(self) -> str:
34 """Get a random company name.
35
36 :return: Company name.
37 """
38 return self.random.choice(self._data['company']['name'])
39
40 def company_type(self, abbr: bool = False) -> str:
41 """Get a random type of business entity.
42
43 :param abbr: Abbreviated company type.
44 :return: Types of business entity.
45 """
46 key = 'abbr' if abbr else 'title'
47 return self.random.choice(
48 self._data['company']['type'][key],
49 )
50
51 def copyright(self) -> str: # noqa: A003
52 """Generate a random copyright.
53
54 :return: Copyright of company.
55 """
56 return '© {}, {}'.format(
57 self.company(),
58 self.company_type(abbr=True),
59 )
60
61 def currency_iso_code(self, allow_random: bool = False) -> str:
62 """Get code of the currency for current locale.
63
64 :param allow_random: Get a random ISO code.
65 :return: Currency code.
66 """
67 if allow_random:
68 return self.random.choice(CURRENCY_ISO_CODES)
69 else:
70 return self._data['currency-code']
71
72 def cryptocurrency_iso_code(self) -> str:
73 """Get symbol of random cryptocurrency.
74
75 :return: Symbol of cryptocurrency.
76 """
77 return self.random.choice(CRYPTOCURRENCY_ISO_CODES)
78
79 def currency_symbol(self):
80 """Get a currency symbol for current locale.
81
82 :return: Currency symbol.
83 """
84 return CURRENCY_SYMBOLS[self.locale]
85
86 def cryptocurrency_symbol(self) -> str:
87 """Get a cryptocurrency symbol.
88
89 :return: Symbol of cryptocurrency.
90 """
91 return self.random.choice(CRYPTOCURRENCY_SYMBOLS)
92
93 def price(self, minimum: float = 10.00,
94 maximum: float = 1000.00) -> str:
95 """Generate a random price.
96
97 :param minimum: Max value of price.
98 :param maximum: Min value of price.
99 :return: Price.
100 """
101 price = self.random.uniform(minimum, maximum, precision=2)
102 return '{0} {1}'.format(price, self.currency_symbol())
103
104 def price_in_btc(self, minimum: float = 0, maximum: float = 2) -> str:
105 """Generate random price in BTC.
106
107 :param minimum: Minimum value of price.
108 :param maximum: Maximum value of price.
109 :return: Price in BTC.
110 """
111 return '{} BTC'.format(
112 self.random.uniform(
113 minimum,
114 maximum,
115 precision=7,
116 ),
117 )
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mimesis/providers/business.py b/mimesis/providers/business.py
--- a/mimesis/providers/business.py
+++ b/mimesis/providers/business.py
@@ -98,8 +98,19 @@
:param maximum: Min value of price.
:return: Price.
"""
- price = self.random.uniform(minimum, maximum, precision=2)
- return '{0} {1}'.format(price, self.currency_symbol())
+ price_format = self._data['price-format']
+ numeric_frac_digits = self._data['numeric-frac-digits']
+ delims = {
+ '.': self._data['numeric-decimal'],
+ ',': self._data['numeric-thousands'],
+ }
+
+ value = self.random.uniform(minimum, maximum)
+ price = '{:,.{}f}'.format(value, numeric_frac_digits)
+
+ price = ''.join(delims.get(char, char) for char in price)
+
+ return price_format.replace('#', price)
def price_in_btc(self, minimum: float = 0, maximum: float = 2) -> str:
"""Generate random price in BTC.
| {"golden_diff": "diff --git a/mimesis/providers/business.py b/mimesis/providers/business.py\n--- a/mimesis/providers/business.py\n+++ b/mimesis/providers/business.py\n@@ -98,8 +98,19 @@\n :param maximum: Min value of price.\n :return: Price.\n \"\"\"\n- price = self.random.uniform(minimum, maximum, precision=2)\n- return '{0} {1}'.format(price, self.currency_symbol())\n+ price_format = self._data['price-format']\n+ numeric_frac_digits = self._data['numeric-frac-digits']\n+ delims = {\n+ '.': self._data['numeric-decimal'],\n+ ',': self._data['numeric-thousands'],\n+ }\n+\n+ value = self.random.uniform(minimum, maximum)\n+ price = '{:,.{}f}'.format(value, numeric_frac_digits)\n+\n+ price = ''.join(delims.get(char, char) for char in price)\n+\n+ return price_format.replace('#', price)\n \n def price_in_btc(self, minimum: float = 0, maximum: float = 2) -> str:\n \"\"\"Generate random price in BTC.\n", "issue": "Add pricetag support\n# Feature request\r\n## Thesis\r\n\r\nCreate a localized pricetag:\r\n\r\n```\r\n>>> mimesis.Business().pricetag()\r\n\"$ 12.34\"\r\n>>> mimesis.Business(\"de\").pricetag(minimum=1000, maximum=1500)\r\n\"1.234,56 \u20ac\"\r\n>>> mimesis.Business(\"cs\").pricetag()\r\n\"75,20 K\u010d\" # rounded to 0.1\r\n```\r\nwith defaults minimum=0.01 and maximum=1000.00 as a proposal for the default provider. Localized providers would define their own values, because of huge differences between the used values, such as USD/EUR/GBP going down to 0.01, and some minor currencies using millions as base.\r\n\r\n## Reasoning\r\n\r\nThis is very locale dependent. Currency is already in the `CURRENCY_SYMBOLS`, but what would be the best place for localized number and price formatting as well as the localized defaults for minimum/maximum?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Business data provider.\"\"\"\n\nfrom mimesis.data import (\n CRYPTOCURRENCY_ISO_CODES,\n CRYPTOCURRENCY_SYMBOLS,\n CURRENCY_ISO_CODES,\n CURRENCY_SYMBOLS,\n)\nfrom mimesis.providers.base import BaseDataProvider\n\n__all__ = ['Business']\n\n\nclass Business(BaseDataProvider):\n \"\"\"Class for generating data for business.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize attributes.\n\n :param locale: Current locale.\n \"\"\"\n super().__init__(*args, **kwargs)\n self._datafile = 'business.json'\n self.pull(self._datafile)\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'business'\n\n def company(self) -> str:\n \"\"\"Get a random company name.\n\n :return: Company name.\n \"\"\"\n return self.random.choice(self._data['company']['name'])\n\n def company_type(self, abbr: bool = False) -> str:\n \"\"\"Get a random type of business entity.\n\n :param abbr: Abbreviated company type.\n :return: Types of business entity.\n \"\"\"\n key = 'abbr' if abbr else 'title'\n return self.random.choice(\n self._data['company']['type'][key],\n )\n\n def copyright(self) -> str: # noqa: A003\n \"\"\"Generate a random copyright.\n\n :return: Copyright of company.\n \"\"\"\n return '\u00a9 {}, {}'.format(\n self.company(),\n self.company_type(abbr=True),\n )\n\n def currency_iso_code(self, allow_random: bool = False) -> str:\n \"\"\"Get code of the currency for current locale.\n\n :param allow_random: Get a random ISO code.\n :return: Currency code.\n \"\"\"\n if allow_random:\n return self.random.choice(CURRENCY_ISO_CODES)\n else:\n return self._data['currency-code']\n\n def cryptocurrency_iso_code(self) -> str:\n \"\"\"Get symbol of random cryptocurrency.\n\n :return: Symbol of cryptocurrency.\n \"\"\"\n return self.random.choice(CRYPTOCURRENCY_ISO_CODES)\n\n def currency_symbol(self):\n \"\"\"Get a currency symbol for current locale.\n\n :return: Currency symbol.\n \"\"\"\n return CURRENCY_SYMBOLS[self.locale]\n\n def cryptocurrency_symbol(self) -> str:\n \"\"\"Get a cryptocurrency symbol.\n\n :return: Symbol of cryptocurrency.\n \"\"\"\n return self.random.choice(CRYPTOCURRENCY_SYMBOLS)\n\n def price(self, minimum: float = 10.00,\n maximum: float = 1000.00) -> str:\n \"\"\"Generate a random price.\n\n :param minimum: Max value of price.\n :param maximum: Min value of price.\n :return: Price.\n \"\"\"\n price = self.random.uniform(minimum, maximum, precision=2)\n return '{0} {1}'.format(price, self.currency_symbol())\n\n def price_in_btc(self, minimum: float = 0, maximum: float = 2) -> str:\n \"\"\"Generate random price in BTC.\n\n :param minimum: Minimum value of price.\n :param maximum: Maximum value of price.\n :return: Price in BTC.\n \"\"\"\n return '{} BTC'.format(\n self.random.uniform(\n minimum,\n maximum,\n precision=7,\n ),\n )\n", "path": "mimesis/providers/business.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Business data provider.\"\"\"\n\nfrom mimesis.data import (\n CRYPTOCURRENCY_ISO_CODES,\n CRYPTOCURRENCY_SYMBOLS,\n CURRENCY_ISO_CODES,\n CURRENCY_SYMBOLS,\n)\nfrom mimesis.providers.base import BaseDataProvider\n\n__all__ = ['Business']\n\n\nclass Business(BaseDataProvider):\n \"\"\"Class for generating data for business.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize attributes.\n\n :param locale: Current locale.\n \"\"\"\n super().__init__(*args, **kwargs)\n self._datafile = 'business.json'\n self.pull(self._datafile)\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'business'\n\n def company(self) -> str:\n \"\"\"Get a random company name.\n\n :return: Company name.\n \"\"\"\n return self.random.choice(self._data['company']['name'])\n\n def company_type(self, abbr: bool = False) -> str:\n \"\"\"Get a random type of business entity.\n\n :param abbr: Abbreviated company type.\n :return: Types of business entity.\n \"\"\"\n key = 'abbr' if abbr else 'title'\n return self.random.choice(\n self._data['company']['type'][key],\n )\n\n def copyright(self) -> str: # noqa: A003\n \"\"\"Generate a random copyright.\n\n :return: Copyright of company.\n \"\"\"\n return '\u00a9 {}, {}'.format(\n self.company(),\n self.company_type(abbr=True),\n )\n\n def currency_iso_code(self, allow_random: bool = False) -> str:\n \"\"\"Get code of the currency for current locale.\n\n :param allow_random: Get a random ISO code.\n :return: Currency code.\n \"\"\"\n if allow_random:\n return self.random.choice(CURRENCY_ISO_CODES)\n else:\n return self._data['currency-code']\n\n def cryptocurrency_iso_code(self) -> str:\n \"\"\"Get symbol of random cryptocurrency.\n\n :return: Symbol of cryptocurrency.\n \"\"\"\n return self.random.choice(CRYPTOCURRENCY_ISO_CODES)\n\n def currency_symbol(self):\n \"\"\"Get a currency symbol for current locale.\n\n :return: Currency symbol.\n \"\"\"\n return CURRENCY_SYMBOLS[self.locale]\n\n def cryptocurrency_symbol(self) -> str:\n \"\"\"Get a cryptocurrency symbol.\n\n :return: Symbol of cryptocurrency.\n \"\"\"\n return self.random.choice(CRYPTOCURRENCY_SYMBOLS)\n\n def price(self, minimum: float = 10.00,\n maximum: float = 1000.00) -> str:\n \"\"\"Generate a random price.\n\n :param minimum: Max value of price.\n :param maximum: Min value of price.\n :return: Price.\n \"\"\"\n price_format = self._data['price-format']\n numeric_frac_digits = self._data['numeric-frac-digits']\n delims = {\n '.': self._data['numeric-decimal'],\n ',': self._data['numeric-thousands'],\n }\n\n value = self.random.uniform(minimum, maximum)\n price = '{:,.{}f}'.format(value, numeric_frac_digits)\n\n price = ''.join(delims.get(char, char) for char in price)\n\n return price_format.replace('#', price)\n\n def price_in_btc(self, minimum: float = 0, maximum: float = 2) -> str:\n \"\"\"Generate random price in BTC.\n\n :param minimum: Minimum value of price.\n :param maximum: Maximum value of price.\n :return: Price in BTC.\n \"\"\"\n return '{} BTC'.format(\n self.random.uniform(\n minimum,\n maximum,\n precision=7,\n ),\n )\n", "path": "mimesis/providers/business.py"}]} | 1,459 | 254 |
gh_patches_debug_26099 | rasdani/github-patches | git_diff | pytorch__ignite-1044 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gpu_info crashes because it cannot parse "N/A"
## 🐛 Bug description
When trying to use gpu_info, it throws:
```
File "/home/blackhc/anaconda3/envs/hello-mnist/lib/python3.7/site-packages/ignite/contrib/metrics/gpu_info.py", line 91, in completed
engine.state.metrics[util_name] = int(util_report['gpu_util'])
ValueError: invalid literal for int() with base 10: 'N/A'
```
There is error handling code above it, but it does not catch the issue ("N/A" is returned).
I assume my GPU does not support it. However, it would be neat to have a graceful failure mode.
Thank you!
Andreas
## Environment
torch 1.5 on a GTX 780 TI (source)
ignite 0.3.0 (conda)
pynvml 8.0.4 (pip)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/contrib/metrics/gpu_info.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import warnings
3
4 import torch
5
6 from ignite.engine import Events
7 from ignite.metrics import Metric
8
9
10 class GpuInfo(Metric):
11 """Provides GPU information: a) used memory percentage, b) gpu utilization percentage values as Metric
12 on each iterations.
13
14 Examples:
15
16 .. code-block:: python
17
18 # Default GPU measurements
19 GpuInfo().attach(trainer, name='gpu') # metric names are 'gpu:X mem(%)', 'gpu:X util(%)'
20
21 # Logging with TQDM
22 ProgressBar(persist=True).attach(trainer, metric_names=['gpu:0 mem(%)', 'gpu:0 util(%)'])
23 # Progress bar will looks like
24 # Epoch [2/10]: [12/24] 50%|█████ , gpu:0 mem(%)=79, gpu:0 util(%)=59 [00:17<1:23]
25
26 # Logging with Tensorboard
27 tb_logger.attach(trainer,
28 log_handler=OutputHandler(tag="training", metric_names='all'),
29 event_name=Events.ITERATION_COMPLETED)
30 """
31
32 def __init__(self):
33 try:
34 import pynvml
35 except ImportError:
36 raise RuntimeError(
37 "This contrib module requires pynvml to be installed. "
38 "Please install it with command: \n pip install pynvml"
39 )
40 # Let's check available devices
41 if not torch.cuda.is_available():
42 raise RuntimeError("This contrib module requires available GPU")
43
44 from pynvml.smi import nvidia_smi
45
46 # Let it fail if no libnvidia drivers or NMVL library found
47 self.nvsmi = nvidia_smi.getInstance()
48 super(GpuInfo, self).__init__()
49
50 def reset(self):
51 pass
52
53 def update(self, output):
54 pass
55
56 def compute(self):
57 data = self.nvsmi.DeviceQuery("memory.used, memory.total, utilization.gpu")
58 if len(data) == 0 or ("gpu" not in data):
59 warnings.warn("No GPU information available")
60 return []
61 return data["gpu"]
62
63 def completed(self, engine, name):
64 data = self.compute()
65 if len(data) < 1:
66 warnings.warn("No GPU information available")
67 return
68
69 for i, data_by_rank in enumerate(data):
70 mem_name = "{}:{} mem(%)".format(name, i)
71
72 if "fb_memory_usage" not in data_by_rank:
73 warnings.warn("No GPU memory usage information available in {}".format(data_by_rank))
74 continue
75 mem_report = data_by_rank["fb_memory_usage"]
76 if not ("used" in mem_report and "total" in mem_report):
77 warnings.warn(
78 "GPU memory usage information does not provide used/total "
79 "memory consumption information in {}".format(mem_report)
80 )
81 continue
82
83 engine.state.metrics[mem_name] = int(mem_report["used"] * 100.0 / mem_report["total"])
84
85 for i, data_by_rank in enumerate(data):
86 util_name = "{}:{} util(%)".format(name, i)
87 if "utilization" not in data_by_rank:
88 warnings.warn("No GPU utilization information available in {}".format(data_by_rank))
89 continue
90 util_report = data_by_rank["utilization"]
91 if not ("gpu_util" in util_report):
92 warnings.warn(
93 "GPU utilization information does not provide 'gpu_util' information in " "{}".format(util_report)
94 )
95 continue
96
97 engine.state.metrics[util_name] = int(util_report["gpu_util"])
98
99 def attach(self, engine, name="gpu", event_name=Events.ITERATION_COMPLETED):
100 engine.add_event_handler(event_name, self.completed, name)
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ignite/contrib/metrics/gpu_info.py b/ignite/contrib/metrics/gpu_info.py
--- a/ignite/contrib/metrics/gpu_info.py
+++ b/ignite/contrib/metrics/gpu_info.py
@@ -11,6 +11,10 @@
"""Provides GPU information: a) used memory percentage, b) gpu utilization percentage values as Metric
on each iterations.
+ .. Note ::
+
+ In case if gpu utilization reports "N/A" on a given GPU, corresponding metric value is not set.
+
Examples:
.. code-block:: python
@@ -90,11 +94,14 @@
util_report = data_by_rank["utilization"]
if not ("gpu_util" in util_report):
warnings.warn(
- "GPU utilization information does not provide 'gpu_util' information in " "{}".format(util_report)
+ "GPU utilization information does not provide 'gpu_util' information in {}".format(util_report)
)
continue
-
- engine.state.metrics[util_name] = int(util_report["gpu_util"])
+ try:
+ engine.state.metrics[util_name] = int(util_report["gpu_util"])
+ except ValueError:
+ # Do not set GPU utilization information
+ pass
def attach(self, engine, name="gpu", event_name=Events.ITERATION_COMPLETED):
engine.add_event_handler(event_name, self.completed, name)
| {"golden_diff": "diff --git a/ignite/contrib/metrics/gpu_info.py b/ignite/contrib/metrics/gpu_info.py\n--- a/ignite/contrib/metrics/gpu_info.py\n+++ b/ignite/contrib/metrics/gpu_info.py\n@@ -11,6 +11,10 @@\n \"\"\"Provides GPU information: a) used memory percentage, b) gpu utilization percentage values as Metric\n on each iterations.\n \n+ .. Note ::\n+\n+ In case if gpu utilization reports \"N/A\" on a given GPU, corresponding metric value is not set.\n+\n Examples:\n \n .. code-block:: python\n@@ -90,11 +94,14 @@\n util_report = data_by_rank[\"utilization\"]\n if not (\"gpu_util\" in util_report):\n warnings.warn(\n- \"GPU utilization information does not provide 'gpu_util' information in \" \"{}\".format(util_report)\n+ \"GPU utilization information does not provide 'gpu_util' information in {}\".format(util_report)\n )\n continue\n-\n- engine.state.metrics[util_name] = int(util_report[\"gpu_util\"])\n+ try:\n+ engine.state.metrics[util_name] = int(util_report[\"gpu_util\"])\n+ except ValueError:\n+ # Do not set GPU utilization information\n+ pass\n \n def attach(self, engine, name=\"gpu\", event_name=Events.ITERATION_COMPLETED):\n engine.add_event_handler(event_name, self.completed, name)\n", "issue": "gpu_info crashes because it cannot parse \"N/A\"\n## \ud83d\udc1b Bug description\r\n\r\nWhen trying to use gpu_info, it throws:\r\n\r\n```\r\n File \"/home/blackhc/anaconda3/envs/hello-mnist/lib/python3.7/site-packages/ignite/contrib/metrics/gpu_info.py\", line 91, in completed\r\n engine.state.metrics[util_name] = int(util_report['gpu_util'])\r\nValueError: invalid literal for int() with base 10: 'N/A'\r\n```\r\n\r\nThere is error handling code above it, but it does not catch the issue (\"N/A\" is returned).\r\n\r\nI assume my GPU does not support it. However, it would be neat to have a graceful failure mode.\r\n\r\nThank you!\r\nAndreas\r\n\r\n## Environment\r\n\r\ntorch 1.5 on a GTX 780 TI (source)\r\nignite 0.3.0 (conda)\r\npynvml 8.0.4 (pip)\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport warnings\n\nimport torch\n\nfrom ignite.engine import Events\nfrom ignite.metrics import Metric\n\n\nclass GpuInfo(Metric):\n \"\"\"Provides GPU information: a) used memory percentage, b) gpu utilization percentage values as Metric\n on each iterations.\n\n Examples:\n\n .. code-block:: python\n\n # Default GPU measurements\n GpuInfo().attach(trainer, name='gpu') # metric names are 'gpu:X mem(%)', 'gpu:X util(%)'\n\n # Logging with TQDM\n ProgressBar(persist=True).attach(trainer, metric_names=['gpu:0 mem(%)', 'gpu:0 util(%)'])\n # Progress bar will looks like\n # Epoch [2/10]: [12/24] 50%|\u2588\u2588\u2588\u2588\u2588 , gpu:0 mem(%)=79, gpu:0 util(%)=59 [00:17<1:23]\n\n # Logging with Tensorboard\n tb_logger.attach(trainer,\n log_handler=OutputHandler(tag=\"training\", metric_names='all'),\n event_name=Events.ITERATION_COMPLETED)\n \"\"\"\n\n def __init__(self):\n try:\n import pynvml\n except ImportError:\n raise RuntimeError(\n \"This contrib module requires pynvml to be installed. \"\n \"Please install it with command: \\n pip install pynvml\"\n )\n # Let's check available devices\n if not torch.cuda.is_available():\n raise RuntimeError(\"This contrib module requires available GPU\")\n\n from pynvml.smi import nvidia_smi\n\n # Let it fail if no libnvidia drivers or NMVL library found\n self.nvsmi = nvidia_smi.getInstance()\n super(GpuInfo, self).__init__()\n\n def reset(self):\n pass\n\n def update(self, output):\n pass\n\n def compute(self):\n data = self.nvsmi.DeviceQuery(\"memory.used, memory.total, utilization.gpu\")\n if len(data) == 0 or (\"gpu\" not in data):\n warnings.warn(\"No GPU information available\")\n return []\n return data[\"gpu\"]\n\n def completed(self, engine, name):\n data = self.compute()\n if len(data) < 1:\n warnings.warn(\"No GPU information available\")\n return\n\n for i, data_by_rank in enumerate(data):\n mem_name = \"{}:{} mem(%)\".format(name, i)\n\n if \"fb_memory_usage\" not in data_by_rank:\n warnings.warn(\"No GPU memory usage information available in {}\".format(data_by_rank))\n continue\n mem_report = data_by_rank[\"fb_memory_usage\"]\n if not (\"used\" in mem_report and \"total\" in mem_report):\n warnings.warn(\n \"GPU memory usage information does not provide used/total \"\n \"memory consumption information in {}\".format(mem_report)\n )\n continue\n\n engine.state.metrics[mem_name] = int(mem_report[\"used\"] * 100.0 / mem_report[\"total\"])\n\n for i, data_by_rank in enumerate(data):\n util_name = \"{}:{} util(%)\".format(name, i)\n if \"utilization\" not in data_by_rank:\n warnings.warn(\"No GPU utilization information available in {}\".format(data_by_rank))\n continue\n util_report = data_by_rank[\"utilization\"]\n if not (\"gpu_util\" in util_report):\n warnings.warn(\n \"GPU utilization information does not provide 'gpu_util' information in \" \"{}\".format(util_report)\n )\n continue\n\n engine.state.metrics[util_name] = int(util_report[\"gpu_util\"])\n\n def attach(self, engine, name=\"gpu\", event_name=Events.ITERATION_COMPLETED):\n engine.add_event_handler(event_name, self.completed, name)\n", "path": "ignite/contrib/metrics/gpu_info.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport warnings\n\nimport torch\n\nfrom ignite.engine import Events\nfrom ignite.metrics import Metric\n\n\nclass GpuInfo(Metric):\n \"\"\"Provides GPU information: a) used memory percentage, b) gpu utilization percentage values as Metric\n on each iterations.\n\n .. Note ::\n\n In case if gpu utilization reports \"N/A\" on a given GPU, corresponding metric value is not set.\n\n Examples:\n\n .. code-block:: python\n\n # Default GPU measurements\n GpuInfo().attach(trainer, name='gpu') # metric names are 'gpu:X mem(%)', 'gpu:X util(%)'\n\n # Logging with TQDM\n ProgressBar(persist=True).attach(trainer, metric_names=['gpu:0 mem(%)', 'gpu:0 util(%)'])\n # Progress bar will looks like\n # Epoch [2/10]: [12/24] 50%|\u2588\u2588\u2588\u2588\u2588 , gpu:0 mem(%)=79, gpu:0 util(%)=59 [00:17<1:23]\n\n # Logging with Tensorboard\n tb_logger.attach(trainer,\n log_handler=OutputHandler(tag=\"training\", metric_names='all'),\n event_name=Events.ITERATION_COMPLETED)\n \"\"\"\n\n def __init__(self):\n try:\n import pynvml\n except ImportError:\n raise RuntimeError(\n \"This contrib module requires pynvml to be installed. \"\n \"Please install it with command: \\n pip install pynvml\"\n )\n # Let's check available devices\n if not torch.cuda.is_available():\n raise RuntimeError(\"This contrib module requires available GPU\")\n\n from pynvml.smi import nvidia_smi\n\n # Let it fail if no libnvidia drivers or NMVL library found\n self.nvsmi = nvidia_smi.getInstance()\n super(GpuInfo, self).__init__()\n\n def reset(self):\n pass\n\n def update(self, output):\n pass\n\n def compute(self):\n data = self.nvsmi.DeviceQuery(\"memory.used, memory.total, utilization.gpu\")\n if len(data) == 0 or (\"gpu\" not in data):\n warnings.warn(\"No GPU information available\")\n return []\n return data[\"gpu\"]\n\n def completed(self, engine, name):\n data = self.compute()\n if len(data) < 1:\n warnings.warn(\"No GPU information available\")\n return\n\n for i, data_by_rank in enumerate(data):\n mem_name = \"{}:{} mem(%)\".format(name, i)\n\n if \"fb_memory_usage\" not in data_by_rank:\n warnings.warn(\"No GPU memory usage information available in {}\".format(data_by_rank))\n continue\n mem_report = data_by_rank[\"fb_memory_usage\"]\n if not (\"used\" in mem_report and \"total\" in mem_report):\n warnings.warn(\n \"GPU memory usage information does not provide used/total \"\n \"memory consumption information in {}\".format(mem_report)\n )\n continue\n\n engine.state.metrics[mem_name] = int(mem_report[\"used\"] * 100.0 / mem_report[\"total\"])\n\n for i, data_by_rank in enumerate(data):\n util_name = \"{}:{} util(%)\".format(name, i)\n if \"utilization\" not in data_by_rank:\n warnings.warn(\"No GPU utilization information available in {}\".format(data_by_rank))\n continue\n util_report = data_by_rank[\"utilization\"]\n if not (\"gpu_util\" in util_report):\n warnings.warn(\n \"GPU utilization information does not provide 'gpu_util' information in {}\".format(util_report)\n )\n continue\n try:\n engine.state.metrics[util_name] = int(util_report[\"gpu_util\"])\n except ValueError:\n # Do not set GPU utilization information\n pass\n\n def attach(self, engine, name=\"gpu\", event_name=Events.ITERATION_COMPLETED):\n engine.add_event_handler(event_name, self.completed, name)\n", "path": "ignite/contrib/metrics/gpu_info.py"}]} | 1,491 | 312 |
gh_patches_debug_30950 | rasdani/github-patches | git_diff | StackStorm__st2-2508 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
linux.wait_for_ssh action improvements
Currently `linux.wait_for_ssh` action (https://github.com/StackStorm/st2/blob/master/contrib/linux/actions/wait_for_ssh.yaml) only supports authenticating with RSA SSH key.
There are multiple ways we can improve this action and make it more generic.
- Support for other key types
- Support for password based authentication
- Refactor the action to utilize the new Paramiko SSH client
- Default to using system user (stanley) username and key file. Ideally key file location should be read from the config
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `contrib/linux/actions/wait_for_ssh.py`
Content:
```
1 #!/usr/bin/env python
2
3 import time
4
5 import paramiko
6
7 from st2actions.runners.pythonrunner import Action
8
9
10 class BaseAction(Action):
11 def run(self, keyfile, username, hostname, ssh_timeout, retries):
12 key = paramiko.RSAKey.from_private_key_file(keyfile)
13 client = paramiko.SSHClient()
14 client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
15
16 for index in range(retries):
17 attempt = index + 1
18
19 try:
20 self.logger.debug('SSH connection attempt: %s' % (attempt))
21 client.connect(hostname=hostname, username=username, pkey=key)
22 return True
23 except Exception as e:
24 self.logger.info('Attempt %s failed (%s), sleeping...' % (attempt, str(e)))
25 time.sleep(ssh_timeout)
26
27 raise Exception('Exceeded max retries (%s)' % (retries))
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/contrib/linux/actions/wait_for_ssh.py b/contrib/linux/actions/wait_for_ssh.py
--- a/contrib/linux/actions/wait_for_ssh.py
+++ b/contrib/linux/actions/wait_for_ssh.py
@@ -2,26 +2,36 @@
import time
-import paramiko
+from oslo_config import cfg
from st2actions.runners.pythonrunner import Action
+from st2actions.runners.ssh.paramiko_ssh import ParamikoSSHClient
class BaseAction(Action):
- def run(self, keyfile, username, hostname, ssh_timeout, retries):
- key = paramiko.RSAKey.from_private_key_file(keyfile)
- client = paramiko.SSHClient()
- client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ def run(self, hostname, port, username, password=None, keyfile=None, ssh_timeout=5,
+ sleep_delay=20, retries=10):
+ # Note: If neither password nor key file is provided, we try to use system user
+ # key file
+ if not password and not keyfile:
+ keyfile = cfg.CONF.system_user.ssh_key_file
+ self.logger.info('Neither "password" nor "keyfile" parameter provided, '
+ 'defaulting to using "%s" key file' % (keyfile))
+
+ client = ParamikoSSHClient(hostname=hostname, port=port, username=username,
+ password=password, key_files=keyfile,
+ timeout=ssh_timeout)
for index in range(retries):
attempt = index + 1
try:
self.logger.debug('SSH connection attempt: %s' % (attempt))
- client.connect(hostname=hostname, username=username, pkey=key)
+ client.connect()
return True
except Exception as e:
- self.logger.info('Attempt %s failed (%s), sleeping...' % (attempt, str(e)))
- time.sleep(ssh_timeout)
+ self.logger.info('Attempt %s failed (%s), sleeping for %s seconds...' %
+ (attempt, str(e), sleep_delay))
+ time.sleep(sleep_delay)
raise Exception('Exceeded max retries (%s)' % (retries))
| {"golden_diff": "diff --git a/contrib/linux/actions/wait_for_ssh.py b/contrib/linux/actions/wait_for_ssh.py\n--- a/contrib/linux/actions/wait_for_ssh.py\n+++ b/contrib/linux/actions/wait_for_ssh.py\n@@ -2,26 +2,36 @@\n \n import time\n \n-import paramiko\n+from oslo_config import cfg\n \n from st2actions.runners.pythonrunner import Action\n+from st2actions.runners.ssh.paramiko_ssh import ParamikoSSHClient\n \n \n class BaseAction(Action):\n- def run(self, keyfile, username, hostname, ssh_timeout, retries):\n- key = paramiko.RSAKey.from_private_key_file(keyfile)\n- client = paramiko.SSHClient()\n- client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n+ def run(self, hostname, port, username, password=None, keyfile=None, ssh_timeout=5,\n+ sleep_delay=20, retries=10):\n+ # Note: If neither password nor key file is provided, we try to use system user\n+ # key file\n+ if not password and not keyfile:\n+ keyfile = cfg.CONF.system_user.ssh_key_file\n+ self.logger.info('Neither \"password\" nor \"keyfile\" parameter provided, '\n+ 'defaulting to using \"%s\" key file' % (keyfile))\n+\n+ client = ParamikoSSHClient(hostname=hostname, port=port, username=username,\n+ password=password, key_files=keyfile,\n+ timeout=ssh_timeout)\n \n for index in range(retries):\n attempt = index + 1\n \n try:\n self.logger.debug('SSH connection attempt: %s' % (attempt))\n- client.connect(hostname=hostname, username=username, pkey=key)\n+ client.connect()\n return True\n except Exception as e:\n- self.logger.info('Attempt %s failed (%s), sleeping...' % (attempt, str(e)))\n- time.sleep(ssh_timeout)\n+ self.logger.info('Attempt %s failed (%s), sleeping for %s seconds...' %\n+ (attempt, str(e), sleep_delay))\n+ time.sleep(sleep_delay)\n \n raise Exception('Exceeded max retries (%s)' % (retries))\n", "issue": "linux.wait_for_ssh action improvements\nCurrently `linux.wait_for_ssh` action (https://github.com/StackStorm/st2/blob/master/contrib/linux/actions/wait_for_ssh.yaml) only supports authenticating with RSA SSH key.\n\nThere are multiple ways we can improve this action and make it more generic.\n- Support for other key types\n- Support for password based authentication\n- Refactor the action to utilize the new Paramiko SSH client\n- Default to using system user (stanley) username and key file. Ideally key file location should be read from the config\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport time\n\nimport paramiko\n\nfrom st2actions.runners.pythonrunner import Action\n\n\nclass BaseAction(Action):\n def run(self, keyfile, username, hostname, ssh_timeout, retries):\n key = paramiko.RSAKey.from_private_key_file(keyfile)\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n for index in range(retries):\n attempt = index + 1\n\n try:\n self.logger.debug('SSH connection attempt: %s' % (attempt))\n client.connect(hostname=hostname, username=username, pkey=key)\n return True\n except Exception as e:\n self.logger.info('Attempt %s failed (%s), sleeping...' % (attempt, str(e)))\n time.sleep(ssh_timeout)\n\n raise Exception('Exceeded max retries (%s)' % (retries))\n", "path": "contrib/linux/actions/wait_for_ssh.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport time\n\nfrom oslo_config import cfg\n\nfrom st2actions.runners.pythonrunner import Action\nfrom st2actions.runners.ssh.paramiko_ssh import ParamikoSSHClient\n\n\nclass BaseAction(Action):\n def run(self, hostname, port, username, password=None, keyfile=None, ssh_timeout=5,\n sleep_delay=20, retries=10):\n # Note: If neither password nor key file is provided, we try to use system user\n # key file\n if not password and not keyfile:\n keyfile = cfg.CONF.system_user.ssh_key_file\n self.logger.info('Neither \"password\" nor \"keyfile\" parameter provided, '\n 'defaulting to using \"%s\" key file' % (keyfile))\n\n client = ParamikoSSHClient(hostname=hostname, port=port, username=username,\n password=password, key_files=keyfile,\n timeout=ssh_timeout)\n\n for index in range(retries):\n attempt = index + 1\n\n try:\n self.logger.debug('SSH connection attempt: %s' % (attempt))\n client.connect()\n return True\n except Exception as e:\n self.logger.info('Attempt %s failed (%s), sleeping for %s seconds...' %\n (attempt, str(e), sleep_delay))\n time.sleep(sleep_delay)\n\n raise Exception('Exceeded max retries (%s)' % (retries))\n", "path": "contrib/linux/actions/wait_for_ssh.py"}]} | 616 | 477 |
gh_patches_debug_31670 | rasdani/github-patches | git_diff | sanic-org__sanic-2578 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Certificates not created with `mkcert` or `trustme` raise a RuntimeError
The `CertLoader` class in `sanic-org/sanic/sanic/worker/loader.py` checks the creator of the certificate. If the creator is not `mkcert` or `trustme` then it raises a `RuntimeError`. This will prevent Sanic from running with certificates from any other sources.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sanic/worker/loader.py`
Content:
```
1 from __future__ import annotations
2
3 import os
4 import sys
5
6 from importlib import import_module
7 from pathlib import Path
8 from typing import (
9 TYPE_CHECKING,
10 Any,
11 Callable,
12 Dict,
13 Optional,
14 Type,
15 Union,
16 cast,
17 )
18
19 from sanic.http.tls.creators import CertCreator, MkcertCreator, TrustmeCreator
20
21
22 if TYPE_CHECKING:
23 from sanic import Sanic as SanicApp
24
25
26 class AppLoader:
27 def __init__(
28 self,
29 module_input: str = "",
30 as_factory: bool = False,
31 as_simple: bool = False,
32 args: Any = None,
33 factory: Optional[Callable[[], SanicApp]] = None,
34 ) -> None:
35 self.module_input = module_input
36 self.module_name = ""
37 self.app_name = ""
38 self.as_factory = as_factory
39 self.as_simple = as_simple
40 self.args = args
41 self.factory = factory
42 self.cwd = os.getcwd()
43
44 if module_input:
45 delimiter = ":" if ":" in module_input else "."
46 if module_input.count(delimiter):
47 module_name, app_name = module_input.rsplit(delimiter, 1)
48 self.module_name = module_name
49 self.app_name = app_name
50 if self.app_name.endswith("()"):
51 self.as_factory = True
52 self.app_name = self.app_name[:-2]
53
54 def load(self) -> SanicApp:
55 module_path = os.path.abspath(self.cwd)
56 if module_path not in sys.path:
57 sys.path.append(module_path)
58
59 if self.factory:
60 return self.factory()
61 else:
62 from sanic.app import Sanic
63 from sanic.simple import create_simple_server
64
65 if self.as_simple:
66 path = Path(self.module_input)
67 app = create_simple_server(path)
68 else:
69 if self.module_name == "" and os.path.isdir(self.module_input):
70 raise ValueError(
71 "App not found.\n"
72 " Please use --simple if you are passing a "
73 "directory to sanic.\n"
74 f" eg. sanic {self.module_input} --simple"
75 )
76
77 module = import_module(self.module_name)
78 app = getattr(module, self.app_name, None)
79 if self.as_factory:
80 try:
81 app = app(self.args)
82 except TypeError:
83 app = app()
84
85 app_type_name = type(app).__name__
86
87 if (
88 not isinstance(app, Sanic)
89 and self.args
90 and hasattr(self.args, "module")
91 ):
92 if callable(app):
93 solution = f"sanic {self.args.module} --factory"
94 raise ValueError(
95 "Module is not a Sanic app, it is a "
96 f"{app_type_name}\n"
97 " If this callable returns a "
98 f"Sanic instance try: \n{solution}"
99 )
100
101 raise ValueError(
102 f"Module is not a Sanic app, it is a {app_type_name}\n"
103 f" Perhaps you meant {self.args.module}:app?"
104 )
105 return app
106
107
108 class CertLoader:
109 _creator_class: Type[CertCreator]
110
111 def __init__(self, ssl_data: Dict[str, Union[str, os.PathLike]]):
112 creator_name = ssl_data.get("creator")
113 if creator_name not in ("mkcert", "trustme"):
114 raise RuntimeError(f"Unknown certificate creator: {creator_name}")
115 elif creator_name == "mkcert":
116 self._creator_class = MkcertCreator
117 elif creator_name == "trustme":
118 self._creator_class = TrustmeCreator
119
120 self._key = ssl_data["key"]
121 self._cert = ssl_data["cert"]
122 self._localhost = cast(str, ssl_data["localhost"])
123
124 def load(self, app: SanicApp):
125 creator = self._creator_class(app, self._key, self._cert)
126 return creator.generate_cert(self._localhost)
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sanic/worker/loader.py b/sanic/worker/loader.py
--- a/sanic/worker/loader.py
+++ b/sanic/worker/loader.py
@@ -5,18 +5,10 @@
from importlib import import_module
from pathlib import Path
-from typing import (
- TYPE_CHECKING,
- Any,
- Callable,
- Dict,
- Optional,
- Type,
- Union,
- cast,
-)
+from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union, cast
-from sanic.http.tls.creators import CertCreator, MkcertCreator, TrustmeCreator
+from sanic.http.tls.context import process_to_context
+from sanic.http.tls.creators import MkcertCreator, TrustmeCreator
if TYPE_CHECKING:
@@ -106,21 +98,30 @@
class CertLoader:
- _creator_class: Type[CertCreator]
+ _creators = {
+ "mkcert": MkcertCreator,
+ "trustme": TrustmeCreator,
+ }
def __init__(self, ssl_data: Dict[str, Union[str, os.PathLike]]):
- creator_name = ssl_data.get("creator")
- if creator_name not in ("mkcert", "trustme"):
+ self._ssl_data = ssl_data
+
+ creator_name = cast(str, ssl_data.get("creator"))
+
+ self._creator_class = self._creators.get(creator_name)
+ if not creator_name:
+ return
+
+ if not self._creator_class:
raise RuntimeError(f"Unknown certificate creator: {creator_name}")
- elif creator_name == "mkcert":
- self._creator_class = MkcertCreator
- elif creator_name == "trustme":
- self._creator_class = TrustmeCreator
self._key = ssl_data["key"]
self._cert = ssl_data["cert"]
self._localhost = cast(str, ssl_data["localhost"])
def load(self, app: SanicApp):
+ if not self._creator_class:
+ return process_to_context(self._ssl_data)
+
creator = self._creator_class(app, self._key, self._cert)
return creator.generate_cert(self._localhost)
| {"golden_diff": "diff --git a/sanic/worker/loader.py b/sanic/worker/loader.py\n--- a/sanic/worker/loader.py\n+++ b/sanic/worker/loader.py\n@@ -5,18 +5,10 @@\n \n from importlib import import_module\n from pathlib import Path\n-from typing import (\n- TYPE_CHECKING,\n- Any,\n- Callable,\n- Dict,\n- Optional,\n- Type,\n- Union,\n- cast,\n-)\n+from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union, cast\n \n-from sanic.http.tls.creators import CertCreator, MkcertCreator, TrustmeCreator\n+from sanic.http.tls.context import process_to_context\n+from sanic.http.tls.creators import MkcertCreator, TrustmeCreator\n \n \n if TYPE_CHECKING:\n@@ -106,21 +98,30 @@\n \n \n class CertLoader:\n- _creator_class: Type[CertCreator]\n+ _creators = {\n+ \"mkcert\": MkcertCreator,\n+ \"trustme\": TrustmeCreator,\n+ }\n \n def __init__(self, ssl_data: Dict[str, Union[str, os.PathLike]]):\n- creator_name = ssl_data.get(\"creator\")\n- if creator_name not in (\"mkcert\", \"trustme\"):\n+ self._ssl_data = ssl_data\n+\n+ creator_name = cast(str, ssl_data.get(\"creator\"))\n+\n+ self._creator_class = self._creators.get(creator_name)\n+ if not creator_name:\n+ return\n+\n+ if not self._creator_class:\n raise RuntimeError(f\"Unknown certificate creator: {creator_name}\")\n- elif creator_name == \"mkcert\":\n- self._creator_class = MkcertCreator\n- elif creator_name == \"trustme\":\n- self._creator_class = TrustmeCreator\n \n self._key = ssl_data[\"key\"]\n self._cert = ssl_data[\"cert\"]\n self._localhost = cast(str, ssl_data[\"localhost\"])\n \n def load(self, app: SanicApp):\n+ if not self._creator_class:\n+ return process_to_context(self._ssl_data)\n+\n creator = self._creator_class(app, self._key, self._cert)\n return creator.generate_cert(self._localhost)\n", "issue": "Certificates not created with `mkcert` or `trustme` raise a RuntimeError\nThe `CertLoader` class in `sanic-org/sanic/sanic/worker/loader.py` checks the creator of the certificate. If the creator is not `mkcert` or `trustme` then it raises a `RuntimeError`. This will prevent Sanic from running with certificates from any other sources.\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport os\nimport sys\n\nfrom importlib import import_module\nfrom pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n Optional,\n Type,\n Union,\n cast,\n)\n\nfrom sanic.http.tls.creators import CertCreator, MkcertCreator, TrustmeCreator\n\n\nif TYPE_CHECKING:\n from sanic import Sanic as SanicApp\n\n\nclass AppLoader:\n def __init__(\n self,\n module_input: str = \"\",\n as_factory: bool = False,\n as_simple: bool = False,\n args: Any = None,\n factory: Optional[Callable[[], SanicApp]] = None,\n ) -> None:\n self.module_input = module_input\n self.module_name = \"\"\n self.app_name = \"\"\n self.as_factory = as_factory\n self.as_simple = as_simple\n self.args = args\n self.factory = factory\n self.cwd = os.getcwd()\n\n if module_input:\n delimiter = \":\" if \":\" in module_input else \".\"\n if module_input.count(delimiter):\n module_name, app_name = module_input.rsplit(delimiter, 1)\n self.module_name = module_name\n self.app_name = app_name\n if self.app_name.endswith(\"()\"):\n self.as_factory = True\n self.app_name = self.app_name[:-2]\n\n def load(self) -> SanicApp:\n module_path = os.path.abspath(self.cwd)\n if module_path not in sys.path:\n sys.path.append(module_path)\n\n if self.factory:\n return self.factory()\n else:\n from sanic.app import Sanic\n from sanic.simple import create_simple_server\n\n if self.as_simple:\n path = Path(self.module_input)\n app = create_simple_server(path)\n else:\n if self.module_name == \"\" and os.path.isdir(self.module_input):\n raise ValueError(\n \"App not found.\\n\"\n \" Please use --simple if you are passing a \"\n \"directory to sanic.\\n\"\n f\" eg. sanic {self.module_input} --simple\"\n )\n\n module = import_module(self.module_name)\n app = getattr(module, self.app_name, None)\n if self.as_factory:\n try:\n app = app(self.args)\n except TypeError:\n app = app()\n\n app_type_name = type(app).__name__\n\n if (\n not isinstance(app, Sanic)\n and self.args\n and hasattr(self.args, \"module\")\n ):\n if callable(app):\n solution = f\"sanic {self.args.module} --factory\"\n raise ValueError(\n \"Module is not a Sanic app, it is a \"\n f\"{app_type_name}\\n\"\n \" If this callable returns a \"\n f\"Sanic instance try: \\n{solution}\"\n )\n\n raise ValueError(\n f\"Module is not a Sanic app, it is a {app_type_name}\\n\"\n f\" Perhaps you meant {self.args.module}:app?\"\n )\n return app\n\n\nclass CertLoader:\n _creator_class: Type[CertCreator]\n\n def __init__(self, ssl_data: Dict[str, Union[str, os.PathLike]]):\n creator_name = ssl_data.get(\"creator\")\n if creator_name not in (\"mkcert\", \"trustme\"):\n raise RuntimeError(f\"Unknown certificate creator: {creator_name}\")\n elif creator_name == \"mkcert\":\n self._creator_class = MkcertCreator\n elif creator_name == \"trustme\":\n self._creator_class = TrustmeCreator\n\n self._key = ssl_data[\"key\"]\n self._cert = ssl_data[\"cert\"]\n self._localhost = cast(str, ssl_data[\"localhost\"])\n\n def load(self, app: SanicApp):\n creator = self._creator_class(app, self._key, self._cert)\n return creator.generate_cert(self._localhost)\n", "path": "sanic/worker/loader.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport os\nimport sys\n\nfrom importlib import import_module\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union, cast\n\nfrom sanic.http.tls.context import process_to_context\nfrom sanic.http.tls.creators import MkcertCreator, TrustmeCreator\n\n\nif TYPE_CHECKING:\n from sanic import Sanic as SanicApp\n\n\nclass AppLoader:\n def __init__(\n self,\n module_input: str = \"\",\n as_factory: bool = False,\n as_simple: bool = False,\n args: Any = None,\n factory: Optional[Callable[[], SanicApp]] = None,\n ) -> None:\n self.module_input = module_input\n self.module_name = \"\"\n self.app_name = \"\"\n self.as_factory = as_factory\n self.as_simple = as_simple\n self.args = args\n self.factory = factory\n self.cwd = os.getcwd()\n\n if module_input:\n delimiter = \":\" if \":\" in module_input else \".\"\n if module_input.count(delimiter):\n module_name, app_name = module_input.rsplit(delimiter, 1)\n self.module_name = module_name\n self.app_name = app_name\n if self.app_name.endswith(\"()\"):\n self.as_factory = True\n self.app_name = self.app_name[:-2]\n\n def load(self) -> SanicApp:\n module_path = os.path.abspath(self.cwd)\n if module_path not in sys.path:\n sys.path.append(module_path)\n\n if self.factory:\n return self.factory()\n else:\n from sanic.app import Sanic\n from sanic.simple import create_simple_server\n\n if self.as_simple:\n path = Path(self.module_input)\n app = create_simple_server(path)\n else:\n if self.module_name == \"\" and os.path.isdir(self.module_input):\n raise ValueError(\n \"App not found.\\n\"\n \" Please use --simple if you are passing a \"\n \"directory to sanic.\\n\"\n f\" eg. sanic {self.module_input} --simple\"\n )\n\n module = import_module(self.module_name)\n app = getattr(module, self.app_name, None)\n if self.as_factory:\n try:\n app = app(self.args)\n except TypeError:\n app = app()\n\n app_type_name = type(app).__name__\n\n if (\n not isinstance(app, Sanic)\n and self.args\n and hasattr(self.args, \"module\")\n ):\n if callable(app):\n solution = f\"sanic {self.args.module} --factory\"\n raise ValueError(\n \"Module is not a Sanic app, it is a \"\n f\"{app_type_name}\\n\"\n \" If this callable returns a \"\n f\"Sanic instance try: \\n{solution}\"\n )\n\n raise ValueError(\n f\"Module is not a Sanic app, it is a {app_type_name}\\n\"\n f\" Perhaps you meant {self.args.module}:app?\"\n )\n return app\n\n\nclass CertLoader:\n _creators = {\n \"mkcert\": MkcertCreator,\n \"trustme\": TrustmeCreator,\n }\n\n def __init__(self, ssl_data: Dict[str, Union[str, os.PathLike]]):\n self._ssl_data = ssl_data\n\n creator_name = cast(str, ssl_data.get(\"creator\"))\n\n self._creator_class = self._creators.get(creator_name)\n if not creator_name:\n return\n\n if not self._creator_class:\n raise RuntimeError(f\"Unknown certificate creator: {creator_name}\")\n\n self._key = ssl_data[\"key\"]\n self._cert = ssl_data[\"cert\"]\n self._localhost = cast(str, ssl_data[\"localhost\"])\n\n def load(self, app: SanicApp):\n if not self._creator_class:\n return process_to_context(self._ssl_data)\n\n creator = self._creator_class(app, self._key, self._cert)\n return creator.generate_cert(self._localhost)\n", "path": "sanic/worker/loader.py"}]} | 1,469 | 499 |
gh_patches_debug_19003 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5302 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_OPENAPI_20 incorrectly flags API keys via HTTPS
**Describe the issue**
#5253 added CKV_OPENAPI_20 with the message "Ensure that API keys are not sent over cleartext", but the [check](https://github.com/bridgecrewio/checkov/blob/main/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py) does not check the API's supported schemes.
If the intent of this check is to prevent cleartext use of API keys, then if the root level [`schemes`](https://swagger.io/docs/specification/2-0/api-host-and-base-path/) key in OpenAPI 2.0 or [`servers`](https://swagger.io/docs/specification/api-host-and-base-path/) key in OpenAPI 3.0 specifies only `https` (2.0) or only `url`s which are HTTPS (3.0), this check should pass.
**Examples**
[fail2.json](https://github.com/bridgecrewio/checkov/blob/main/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/fail2.json) and its YAML counterpart should fail, but if they specified `"schemes": ["https"]`, they should not.
Ditto for the OpenAPI 3.0 version of this example.
**Version (please complete the following information):**
2.3.312
**Additional context**
It may be that the message is wrong and you actually intend to flag all use of API keys, but if that's the case, the message should convey that. I also would argue that header API keys should not be scrutinized the same way as query parameter API keys, since the risk of leaking the API key unintentionally is higher with the latter.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/openapi/checks/resource/generic/ClearTextAPIKey.py`
Content:
```
1 from __future__ import annotations
2
3 from typing import Any
4 from checkov.common.models.enums import CheckResult, CheckCategories
5 from checkov.common.checks.enums import BlockType
6 from checkov.common.util.consts import LINE_FIELD_NAMES
7 from checkov.openapi.checks.base_openapi_check import BaseOpenapiCheck
8
9
10 class ClearTestAPIKey(BaseOpenapiCheck):
11 def __init__(self) -> None:
12 id = "CKV_OPENAPI_20"
13 name = "Ensure that API keys are not sent over cleartext"
14 categories = (CheckCategories.API_SECURITY,)
15 supported_resources = ('paths',)
16 super().__init__(name=name, id=id, categories=categories, supported_entities=supported_resources,
17 block_type=BlockType.DOCUMENT)
18
19 def scan_entity_conf(self, conf: dict[str, Any], entity_type: str) -> tuple[CheckResult, dict[str, Any]]: # type:ignore[override] # return type is different than the base class
20 components = conf.get("components")
21 security_def = conf.get("securityDefinitions")
22 if components and isinstance(components, dict):
23 security_schemes = components.get("securitySchemes") or {}
24 elif security_def:
25 security_schemes = security_def
26 else:
27 return CheckResult.PASSED, conf
28
29 paths = conf.get('paths')
30 if not isinstance(paths, dict):
31 return CheckResult.PASSED, security_schemes
32
33 filtered_dict = {}
34 if isinstance(security_schemes, dict):
35 for name, scheme in security_schemes.items():
36 if isinstance(scheme, dict) and scheme.get('type') == "apiKey":
37 filtered_dict[name] = scheme
38
39 if not filtered_dict:
40 return CheckResult.PASSED, security_schemes
41
42 for key, path in paths.items():
43 if not path:
44 continue
45 if key in LINE_FIELD_NAMES:
46 continue
47 for value in path.values():
48 if not isinstance(value, dict):
49 continue
50 operation_security = value.get('security')
51 if operation_security and isinstance(operation_security, list):
52 for sec in operation_security[0]:
53 if sec in filtered_dict:
54 return CheckResult.FAILED, security_schemes
55
56 return CheckResult.PASSED, conf
57
58
59 check = ClearTestAPIKey()
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py b/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py
--- a/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py
+++ b/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py
@@ -17,6 +17,16 @@
block_type=BlockType.DOCUMENT)
def scan_entity_conf(self, conf: dict[str, Any], entity_type: str) -> tuple[CheckResult, dict[str, Any]]: # type:ignore[override] # return type is different than the base class
+ schemes = conf.get("schemes")
+ if schemes and isinstance(schemes, list):
+ if "http" not in schemes and "wp" not in schemes:
+ return CheckResult.PASSED, conf
+
+ servers = conf.get("servers")
+ if servers and isinstance(servers, list):
+ if not any(server['url'].startswith('http://') for server in servers):
+ return CheckResult.PASSED, conf
+
components = conf.get("components")
security_def = conf.get("securityDefinitions")
if components and isinstance(components, dict):
| {"golden_diff": "diff --git a/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py b/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py\n--- a/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py\n+++ b/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py\n@@ -17,6 +17,16 @@\n block_type=BlockType.DOCUMENT)\n \n def scan_entity_conf(self, conf: dict[str, Any], entity_type: str) -> tuple[CheckResult, dict[str, Any]]: # type:ignore[override] # return type is different than the base class\n+ schemes = conf.get(\"schemes\")\n+ if schemes and isinstance(schemes, list):\n+ if \"http\" not in schemes and \"wp\" not in schemes:\n+ return CheckResult.PASSED, conf\n+\n+ servers = conf.get(\"servers\")\n+ if servers and isinstance(servers, list):\n+ if not any(server['url'].startswith('http://') for server in servers):\n+ return CheckResult.PASSED, conf\n+\n components = conf.get(\"components\")\n security_def = conf.get(\"securityDefinitions\")\n if components and isinstance(components, dict):\n", "issue": "CKV_OPENAPI_20 incorrectly flags API keys via HTTPS\n**Describe the issue**\r\n\r\n#5253 added CKV_OPENAPI_20 with the message \"Ensure that API keys are not sent over cleartext\", but the [check](https://github.com/bridgecrewio/checkov/blob/main/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py) does not check the API's supported schemes.\r\n\r\nIf the intent of this check is to prevent cleartext use of API keys, then if the root level [`schemes`](https://swagger.io/docs/specification/2-0/api-host-and-base-path/) key in OpenAPI 2.0 or [`servers`](https://swagger.io/docs/specification/api-host-and-base-path/) key in OpenAPI 3.0 specifies only `https` (2.0) or only `url`s which are HTTPS (3.0), this check should pass.\r\n\r\n**Examples**\r\n\r\n[fail2.json](https://github.com/bridgecrewio/checkov/blob/main/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/fail2.json) and its YAML counterpart should fail, but if they specified `\"schemes\": [\"https\"]`, they should not.\r\n\r\nDitto for the OpenAPI 3.0 version of this example.\r\n\r\n**Version (please complete the following information):**\r\n\r\n2.3.312\r\n\r\n**Additional context**\r\n\r\nIt may be that the message is wrong and you actually intend to flag all use of API keys, but if that's the case, the message should convey that. I also would argue that header API keys should not be scrutinized the same way as query parameter API keys, since the risk of leaking the API key unintentionally is higher with the latter.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.common.checks.enums import BlockType\nfrom checkov.common.util.consts import LINE_FIELD_NAMES\nfrom checkov.openapi.checks.base_openapi_check import BaseOpenapiCheck\n\n\nclass ClearTestAPIKey(BaseOpenapiCheck):\n def __init__(self) -> None:\n id = \"CKV_OPENAPI_20\"\n name = \"Ensure that API keys are not sent over cleartext\"\n categories = (CheckCategories.API_SECURITY,)\n supported_resources = ('paths',)\n super().__init__(name=name, id=id, categories=categories, supported_entities=supported_resources,\n block_type=BlockType.DOCUMENT)\n\n def scan_entity_conf(self, conf: dict[str, Any], entity_type: str) -> tuple[CheckResult, dict[str, Any]]: # type:ignore[override] # return type is different than the base class\n components = conf.get(\"components\")\n security_def = conf.get(\"securityDefinitions\")\n if components and isinstance(components, dict):\n security_schemes = components.get(\"securitySchemes\") or {}\n elif security_def:\n security_schemes = security_def\n else:\n return CheckResult.PASSED, conf\n\n paths = conf.get('paths')\n if not isinstance(paths, dict):\n return CheckResult.PASSED, security_schemes\n\n filtered_dict = {}\n if isinstance(security_schemes, dict):\n for name, scheme in security_schemes.items():\n if isinstance(scheme, dict) and scheme.get('type') == \"apiKey\":\n filtered_dict[name] = scheme\n\n if not filtered_dict:\n return CheckResult.PASSED, security_schemes\n\n for key, path in paths.items():\n if not path:\n continue\n if key in LINE_FIELD_NAMES:\n continue\n for value in path.values():\n if not isinstance(value, dict):\n continue\n operation_security = value.get('security')\n if operation_security and isinstance(operation_security, list):\n for sec in operation_security[0]:\n if sec in filtered_dict:\n return CheckResult.FAILED, security_schemes\n\n return CheckResult.PASSED, conf\n\n\ncheck = ClearTestAPIKey()\n", "path": "checkov/openapi/checks/resource/generic/ClearTextAPIKey.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Any\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.common.checks.enums import BlockType\nfrom checkov.common.util.consts import LINE_FIELD_NAMES\nfrom checkov.openapi.checks.base_openapi_check import BaseOpenapiCheck\n\n\nclass ClearTestAPIKey(BaseOpenapiCheck):\n def __init__(self) -> None:\n id = \"CKV_OPENAPI_20\"\n name = \"Ensure that API keys are not sent over cleartext\"\n categories = (CheckCategories.API_SECURITY,)\n supported_resources = ('paths',)\n super().__init__(name=name, id=id, categories=categories, supported_entities=supported_resources,\n block_type=BlockType.DOCUMENT)\n\n def scan_entity_conf(self, conf: dict[str, Any], entity_type: str) -> tuple[CheckResult, dict[str, Any]]: # type:ignore[override] # return type is different than the base class\n schemes = conf.get(\"schemes\")\n if schemes and isinstance(schemes, list):\n if \"http\" not in schemes and \"wp\" not in schemes:\n return CheckResult.PASSED, conf\n\n servers = conf.get(\"servers\")\n if servers and isinstance(servers, list):\n if not any(server['url'].startswith('http://') for server in servers):\n return CheckResult.PASSED, conf\n\n components = conf.get(\"components\")\n security_def = conf.get(\"securityDefinitions\")\n if components and isinstance(components, dict):\n security_schemes = components.get(\"securitySchemes\") or {}\n elif security_def:\n security_schemes = security_def\n else:\n return CheckResult.PASSED, conf\n\n paths = conf.get('paths')\n if not isinstance(paths, dict):\n return CheckResult.PASSED, security_schemes\n\n filtered_dict = {}\n if isinstance(security_schemes, dict):\n for name, scheme in security_schemes.items():\n if isinstance(scheme, dict) and scheme.get('type') == \"apiKey\":\n filtered_dict[name] = scheme\n\n if not filtered_dict:\n return CheckResult.PASSED, security_schemes\n\n for key, path in paths.items():\n if not path:\n continue\n if key in LINE_FIELD_NAMES:\n continue\n for value in path.values():\n if not isinstance(value, dict):\n continue\n operation_security = value.get('security')\n if operation_security and isinstance(operation_security, list):\n for sec in operation_security[0]:\n if sec in filtered_dict:\n return CheckResult.FAILED, security_schemes\n\n return CheckResult.PASSED, conf\n\n\ncheck = ClearTestAPIKey()\n", "path": "checkov/openapi/checks/resource/generic/ClearTextAPIKey.py"}]} | 1,242 | 273 |
gh_patches_debug_4711 | rasdani/github-patches | git_diff | scrapy__scrapy-4323 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mac OS X, OS X → macOS
We have a few references in the documentation where we use the old name of that OS. We should update them.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/extensions/memusage.py`
Content:
```
1 """
2 MemoryUsage extension
3
4 See documentation in docs/topics/extensions.rst
5 """
6 import sys
7 import socket
8 import logging
9 from pprint import pformat
10 from importlib import import_module
11
12 from twisted.internet import task
13
14 from scrapy import signals
15 from scrapy.exceptions import NotConfigured
16 from scrapy.mail import MailSender
17 from scrapy.utils.engine import get_engine_status
18
19 logger = logging.getLogger(__name__)
20
21
22 class MemoryUsage(object):
23
24 def __init__(self, crawler):
25 if not crawler.settings.getbool('MEMUSAGE_ENABLED'):
26 raise NotConfigured
27 try:
28 # stdlib's resource module is only available on unix platforms.
29 self.resource = import_module('resource')
30 except ImportError:
31 raise NotConfigured
32
33 self.crawler = crawler
34 self.warned = False
35 self.notify_mails = crawler.settings.getlist('MEMUSAGE_NOTIFY_MAIL')
36 self.limit = crawler.settings.getint('MEMUSAGE_LIMIT_MB')*1024*1024
37 self.warning = crawler.settings.getint('MEMUSAGE_WARNING_MB')*1024*1024
38 self.check_interval = crawler.settings.getfloat('MEMUSAGE_CHECK_INTERVAL_SECONDS')
39 self.mail = MailSender.from_settings(crawler.settings)
40 crawler.signals.connect(self.engine_started, signal=signals.engine_started)
41 crawler.signals.connect(self.engine_stopped, signal=signals.engine_stopped)
42
43 @classmethod
44 def from_crawler(cls, crawler):
45 return cls(crawler)
46
47 def get_virtual_size(self):
48 size = self.resource.getrusage(self.resource.RUSAGE_SELF).ru_maxrss
49 if sys.platform != 'darwin':
50 # on Mac OS X ru_maxrss is in bytes, on Linux it is in KB
51 size *= 1024
52 return size
53
54 def engine_started(self):
55 self.crawler.stats.set_value('memusage/startup', self.get_virtual_size())
56 self.tasks = []
57 tsk = task.LoopingCall(self.update)
58 self.tasks.append(tsk)
59 tsk.start(self.check_interval, now=True)
60 if self.limit:
61 tsk = task.LoopingCall(self._check_limit)
62 self.tasks.append(tsk)
63 tsk.start(self.check_interval, now=True)
64 if self.warning:
65 tsk = task.LoopingCall(self._check_warning)
66 self.tasks.append(tsk)
67 tsk.start(self.check_interval, now=True)
68
69 def engine_stopped(self):
70 for tsk in self.tasks:
71 if tsk.running:
72 tsk.stop()
73
74 def update(self):
75 self.crawler.stats.max_value('memusage/max', self.get_virtual_size())
76
77 def _check_limit(self):
78 if self.get_virtual_size() > self.limit:
79 self.crawler.stats.set_value('memusage/limit_reached', 1)
80 mem = self.limit/1024/1024
81 logger.error("Memory usage exceeded %(memusage)dM. Shutting down Scrapy...",
82 {'memusage': mem}, extra={'crawler': self.crawler})
83 if self.notify_mails:
84 subj = "%s terminated: memory usage exceeded %dM at %s" % \
85 (self.crawler.settings['BOT_NAME'], mem, socket.gethostname())
86 self._send_report(self.notify_mails, subj)
87 self.crawler.stats.set_value('memusage/limit_notified', 1)
88
89 open_spiders = self.crawler.engine.open_spiders
90 if open_spiders:
91 for spider in open_spiders:
92 self.crawler.engine.close_spider(spider, 'memusage_exceeded')
93 else:
94 self.crawler.stop()
95
96 def _check_warning(self):
97 if self.warned: # warn only once
98 return
99 if self.get_virtual_size() > self.warning:
100 self.crawler.stats.set_value('memusage/warning_reached', 1)
101 mem = self.warning/1024/1024
102 logger.warning("Memory usage reached %(memusage)dM",
103 {'memusage': mem}, extra={'crawler': self.crawler})
104 if self.notify_mails:
105 subj = "%s warning: memory usage reached %dM at %s" % \
106 (self.crawler.settings['BOT_NAME'], mem, socket.gethostname())
107 self._send_report(self.notify_mails, subj)
108 self.crawler.stats.set_value('memusage/warning_notified', 1)
109 self.warned = True
110
111 def _send_report(self, rcpts, subject):
112 """send notification mail with some additional useful info"""
113 stats = self.crawler.stats
114 s = "Memory usage at engine startup : %dM\r\n" % (stats.get_value('memusage/startup')/1024/1024)
115 s += "Maximum memory usage : %dM\r\n" % (stats.get_value('memusage/max')/1024/1024)
116 s += "Current memory usage : %dM\r\n" % (self.get_virtual_size()/1024/1024)
117
118 s += "ENGINE STATUS ------------------------------------------------------- \r\n"
119 s += "\r\n"
120 s += pformat(get_engine_status(self.crawler.engine))
121 s += "\r\n"
122 self.mail.send(rcpts, subject, s)
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scrapy/extensions/memusage.py b/scrapy/extensions/memusage.py
--- a/scrapy/extensions/memusage.py
+++ b/scrapy/extensions/memusage.py
@@ -47,7 +47,7 @@
def get_virtual_size(self):
size = self.resource.getrusage(self.resource.RUSAGE_SELF).ru_maxrss
if sys.platform != 'darwin':
- # on Mac OS X ru_maxrss is in bytes, on Linux it is in KB
+ # on macOS ru_maxrss is in bytes, on Linux it is in KB
size *= 1024
return size
| {"golden_diff": "diff --git a/scrapy/extensions/memusage.py b/scrapy/extensions/memusage.py\n--- a/scrapy/extensions/memusage.py\n+++ b/scrapy/extensions/memusage.py\n@@ -47,7 +47,7 @@\n def get_virtual_size(self):\n size = self.resource.getrusage(self.resource.RUSAGE_SELF).ru_maxrss\n if sys.platform != 'darwin':\n- # on Mac OS X ru_maxrss is in bytes, on Linux it is in KB\n+ # on macOS ru_maxrss is in bytes, on Linux it is in KB\n size *= 1024\n return size\n", "issue": "Mac OS X, OS X \u2192 macOS\nWe have a few references in the documentation where we use the old name of that OS. We should update them.\n", "before_files": [{"content": "\"\"\"\nMemoryUsage extension\n\nSee documentation in docs/topics/extensions.rst\n\"\"\"\nimport sys\nimport socket\nimport logging\nfrom pprint import pformat\nfrom importlib import import_module\n\nfrom twisted.internet import task\n\nfrom scrapy import signals\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.mail import MailSender\nfrom scrapy.utils.engine import get_engine_status\n\nlogger = logging.getLogger(__name__)\n\n\nclass MemoryUsage(object):\n\n def __init__(self, crawler):\n if not crawler.settings.getbool('MEMUSAGE_ENABLED'):\n raise NotConfigured\n try:\n # stdlib's resource module is only available on unix platforms.\n self.resource = import_module('resource')\n except ImportError:\n raise NotConfigured\n\n self.crawler = crawler\n self.warned = False\n self.notify_mails = crawler.settings.getlist('MEMUSAGE_NOTIFY_MAIL')\n self.limit = crawler.settings.getint('MEMUSAGE_LIMIT_MB')*1024*1024\n self.warning = crawler.settings.getint('MEMUSAGE_WARNING_MB')*1024*1024\n self.check_interval = crawler.settings.getfloat('MEMUSAGE_CHECK_INTERVAL_SECONDS')\n self.mail = MailSender.from_settings(crawler.settings)\n crawler.signals.connect(self.engine_started, signal=signals.engine_started)\n crawler.signals.connect(self.engine_stopped, signal=signals.engine_stopped)\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler)\n\n def get_virtual_size(self):\n size = self.resource.getrusage(self.resource.RUSAGE_SELF).ru_maxrss\n if sys.platform != 'darwin':\n # on Mac OS X ru_maxrss is in bytes, on Linux it is in KB\n size *= 1024\n return size\n\n def engine_started(self):\n self.crawler.stats.set_value('memusage/startup', self.get_virtual_size())\n self.tasks = []\n tsk = task.LoopingCall(self.update)\n self.tasks.append(tsk)\n tsk.start(self.check_interval, now=True)\n if self.limit:\n tsk = task.LoopingCall(self._check_limit)\n self.tasks.append(tsk)\n tsk.start(self.check_interval, now=True)\n if self.warning:\n tsk = task.LoopingCall(self._check_warning)\n self.tasks.append(tsk)\n tsk.start(self.check_interval, now=True)\n\n def engine_stopped(self):\n for tsk in self.tasks:\n if tsk.running:\n tsk.stop()\n\n def update(self):\n self.crawler.stats.max_value('memusage/max', self.get_virtual_size())\n\n def _check_limit(self):\n if self.get_virtual_size() > self.limit:\n self.crawler.stats.set_value('memusage/limit_reached', 1)\n mem = self.limit/1024/1024\n logger.error(\"Memory usage exceeded %(memusage)dM. Shutting down Scrapy...\",\n {'memusage': mem}, extra={'crawler': self.crawler})\n if self.notify_mails:\n subj = \"%s terminated: memory usage exceeded %dM at %s\" % \\\n (self.crawler.settings['BOT_NAME'], mem, socket.gethostname())\n self._send_report(self.notify_mails, subj)\n self.crawler.stats.set_value('memusage/limit_notified', 1)\n\n open_spiders = self.crawler.engine.open_spiders\n if open_spiders:\n for spider in open_spiders:\n self.crawler.engine.close_spider(spider, 'memusage_exceeded')\n else:\n self.crawler.stop()\n\n def _check_warning(self):\n if self.warned: # warn only once\n return\n if self.get_virtual_size() > self.warning:\n self.crawler.stats.set_value('memusage/warning_reached', 1)\n mem = self.warning/1024/1024\n logger.warning(\"Memory usage reached %(memusage)dM\",\n {'memusage': mem}, extra={'crawler': self.crawler})\n if self.notify_mails:\n subj = \"%s warning: memory usage reached %dM at %s\" % \\\n (self.crawler.settings['BOT_NAME'], mem, socket.gethostname())\n self._send_report(self.notify_mails, subj)\n self.crawler.stats.set_value('memusage/warning_notified', 1)\n self.warned = True\n\n def _send_report(self, rcpts, subject):\n \"\"\"send notification mail with some additional useful info\"\"\"\n stats = self.crawler.stats\n s = \"Memory usage at engine startup : %dM\\r\\n\" % (stats.get_value('memusage/startup')/1024/1024)\n s += \"Maximum memory usage : %dM\\r\\n\" % (stats.get_value('memusage/max')/1024/1024)\n s += \"Current memory usage : %dM\\r\\n\" % (self.get_virtual_size()/1024/1024)\n\n s += \"ENGINE STATUS ------------------------------------------------------- \\r\\n\"\n s += \"\\r\\n\"\n s += pformat(get_engine_status(self.crawler.engine))\n s += \"\\r\\n\"\n self.mail.send(rcpts, subject, s)\n", "path": "scrapy/extensions/memusage.py"}], "after_files": [{"content": "\"\"\"\nMemoryUsage extension\n\nSee documentation in docs/topics/extensions.rst\n\"\"\"\nimport sys\nimport socket\nimport logging\nfrom pprint import pformat\nfrom importlib import import_module\n\nfrom twisted.internet import task\n\nfrom scrapy import signals\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.mail import MailSender\nfrom scrapy.utils.engine import get_engine_status\n\nlogger = logging.getLogger(__name__)\n\n\nclass MemoryUsage(object):\n\n def __init__(self, crawler):\n if not crawler.settings.getbool('MEMUSAGE_ENABLED'):\n raise NotConfigured\n try:\n # stdlib's resource module is only available on unix platforms.\n self.resource = import_module('resource')\n except ImportError:\n raise NotConfigured\n\n self.crawler = crawler\n self.warned = False\n self.notify_mails = crawler.settings.getlist('MEMUSAGE_NOTIFY_MAIL')\n self.limit = crawler.settings.getint('MEMUSAGE_LIMIT_MB')*1024*1024\n self.warning = crawler.settings.getint('MEMUSAGE_WARNING_MB')*1024*1024\n self.check_interval = crawler.settings.getfloat('MEMUSAGE_CHECK_INTERVAL_SECONDS')\n self.mail = MailSender.from_settings(crawler.settings)\n crawler.signals.connect(self.engine_started, signal=signals.engine_started)\n crawler.signals.connect(self.engine_stopped, signal=signals.engine_stopped)\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler)\n\n def get_virtual_size(self):\n size = self.resource.getrusage(self.resource.RUSAGE_SELF).ru_maxrss\n if sys.platform != 'darwin':\n # on macOS ru_maxrss is in bytes, on Linux it is in KB\n size *= 1024\n return size\n\n def engine_started(self):\n self.crawler.stats.set_value('memusage/startup', self.get_virtual_size())\n self.tasks = []\n tsk = task.LoopingCall(self.update)\n self.tasks.append(tsk)\n tsk.start(self.check_interval, now=True)\n if self.limit:\n tsk = task.LoopingCall(self._check_limit)\n self.tasks.append(tsk)\n tsk.start(self.check_interval, now=True)\n if self.warning:\n tsk = task.LoopingCall(self._check_warning)\n self.tasks.append(tsk)\n tsk.start(self.check_interval, now=True)\n\n def engine_stopped(self):\n for tsk in self.tasks:\n if tsk.running:\n tsk.stop()\n\n def update(self):\n self.crawler.stats.max_value('memusage/max', self.get_virtual_size())\n\n def _check_limit(self):\n if self.get_virtual_size() > self.limit:\n self.crawler.stats.set_value('memusage/limit_reached', 1)\n mem = self.limit/1024/1024\n logger.error(\"Memory usage exceeded %(memusage)dM. Shutting down Scrapy...\",\n {'memusage': mem}, extra={'crawler': self.crawler})\n if self.notify_mails:\n subj = \"%s terminated: memory usage exceeded %dM at %s\" % \\\n (self.crawler.settings['BOT_NAME'], mem, socket.gethostname())\n self._send_report(self.notify_mails, subj)\n self.crawler.stats.set_value('memusage/limit_notified', 1)\n\n open_spiders = self.crawler.engine.open_spiders\n if open_spiders:\n for spider in open_spiders:\n self.crawler.engine.close_spider(spider, 'memusage_exceeded')\n else:\n self.crawler.stop()\n\n def _check_warning(self):\n if self.warned: # warn only once\n return\n if self.get_virtual_size() > self.warning:\n self.crawler.stats.set_value('memusage/warning_reached', 1)\n mem = self.warning/1024/1024\n logger.warning(\"Memory usage reached %(memusage)dM\",\n {'memusage': mem}, extra={'crawler': self.crawler})\n if self.notify_mails:\n subj = \"%s warning: memory usage reached %dM at %s\" % \\\n (self.crawler.settings['BOT_NAME'], mem, socket.gethostname())\n self._send_report(self.notify_mails, subj)\n self.crawler.stats.set_value('memusage/warning_notified', 1)\n self.warned = True\n\n def _send_report(self, rcpts, subject):\n \"\"\"send notification mail with some additional useful info\"\"\"\n stats = self.crawler.stats\n s = \"Memory usage at engine startup : %dM\\r\\n\" % (stats.get_value('memusage/startup')/1024/1024)\n s += \"Maximum memory usage : %dM\\r\\n\" % (stats.get_value('memusage/max')/1024/1024)\n s += \"Current memory usage : %dM\\r\\n\" % (self.get_virtual_size()/1024/1024)\n\n s += \"ENGINE STATUS ------------------------------------------------------- \\r\\n\"\n s += \"\\r\\n\"\n s += pformat(get_engine_status(self.crawler.engine))\n s += \"\\r\\n\"\n self.mail.send(rcpts, subject, s)\n", "path": "scrapy/extensions/memusage.py"}]} | 1,691 | 138 |
gh_patches_debug_20026 | rasdani/github-patches | git_diff | yt-dlp__yt-dlp-1063 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[niconico_dmc] Heartbeat failed with SOCKS5 `HTTPS?_PROXY` env
## Checklist
- [x] I'm reporting a broken site support
- [x] I've verified that I'm running yt-dlp version **2021.09.02**
- [x] I've checked that all provided URLs are alive and playable in a browser
- [x] I've checked that all URLs and arguments with special characters are properly quoted or escaped
- [x] I've searched the bugtracker for similar issues including closed ones
## Verbose log
```
$ HTTP_PROXY=socks5://127.0.0.1:10080 HTTPS_PROXY=socks5://127.0.0.1:10080 yt-dlp --verbose --proxy socks5://127.0.0.1:10080 https://www.nicovideo.jp/watch/sm9
[debug] Command-line config: ['--verbose', '--proxy', 'socks5://127.0.0.1:10080', 'https://www.nicovideo.jp/watch/sm9']
[debug] Encodings: locale UTF-8, fs utf-8, out utf-8, pref UTF-8
[debug] yt-dlp version 2021.09.02 (source)
[debug] Python version 3.10.0rc2 (CPython 32bit) - Linux-3.10.49-armv7l-with-libc
[debug] exe versions: ffmpeg 4.4, ffprobe 4.4
[debug] Optional libraries: mutagen, pycryptodome, sqlite, websockets
[debug] Proxy map: {'http': 'socks5://127.0.0.1:10080', 'https': 'socks5://127.0.0.1:10080'}
[debug] [niconico] Extracting URL: https://www.nicovideo.jp/watch/sm9
[niconico] sm9: Downloading webpage
[niconico] sm9: Downloading video info page
[debug] Formats sorted by: hasvid, ie_pref, lang, quality, res, fps, vcodec:vp9.2(10), acodec, filesize, fs_approx, tbr, vbr, abr, asr, proto, vext, aext, hasaud, source, id
[debug] Default format spec: bestvideo*+bestaudio/best
[info] sm9: Downloading 1 format(s): h264_360p-aac_128kbps
[debug] Invoking downloader on "niconico_dmc:sm9/archive_h264_360p/archive_aac_128kbps"
[niconico_dmc] Downloading from DMC
[niconico] sm9: Downloading JSON metadata for h264_360p-aac_128kbps
[niconico] sm9: Acquiring permission for downloading video
[niconico_dmc] Heartbeat with 40 second interval ...
[niconico_dmc] Heartbeat failed
[download] Destination: 新・豪血寺一族 -煩悩解放 - レッツゴー!陰陽師 [sm9].mp4
[download] 25.4% of 26.95MiB at 174.88KiB/s ETA 01:57[niconico_dmc] Heartbeat failed
[download] 39.8% of 26.95MiB at 137.12KiB/s ETA 02:01[niconico_dmc] Heartbeat failed
[download] 53.6% of 26.95MiB at 124.49KiB/s ETA 01:42[download] Got server HTTP error: Downloaded 15136650 bytes, expected 28261021 bytes. Retrying (attempt 1 of 10) ...
```
## Description
There seem to be two separate issues:
- niconico_dmc heartbeat connection does not respect the `--proxy` option, and
- it cannot connect via a SOCKS5 server.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt_dlp/downloader/niconico.py`
Content:
```
1 # coding: utf-8
2 from __future__ import unicode_literals
3
4 import threading
5
6 from .common import FileDownloader
7 from ..downloader import get_suitable_downloader
8 from ..extractor.niconico import NiconicoIE
9 from ..compat import compat_urllib_request
10
11
12 class NiconicoDmcFD(FileDownloader):
13 """ Downloading niconico douga from DMC with heartbeat """
14
15 FD_NAME = 'niconico_dmc'
16
17 def real_download(self, filename, info_dict):
18 self.to_screen('[%s] Downloading from DMC' % self.FD_NAME)
19
20 ie = NiconicoIE(self.ydl)
21 info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict)
22
23 fd = get_suitable_downloader(info_dict, params=self.params)(self.ydl, self.params)
24
25 success = download_complete = False
26 timer = [None]
27 heartbeat_lock = threading.Lock()
28 heartbeat_url = heartbeat_info_dict['url']
29 heartbeat_data = heartbeat_info_dict['data'].encode()
30 heartbeat_interval = heartbeat_info_dict.get('interval', 30)
31
32 def heartbeat():
33 try:
34 compat_urllib_request.urlopen(url=heartbeat_url, data=heartbeat_data)
35 except Exception:
36 self.to_screen('[%s] Heartbeat failed' % self.FD_NAME)
37
38 with heartbeat_lock:
39 if not download_complete:
40 timer[0] = threading.Timer(heartbeat_interval, heartbeat)
41 timer[0].start()
42
43 heartbeat_info_dict['ping']()
44 self.to_screen('[%s] Heartbeat with %d second interval ...' % (self.FD_NAME, heartbeat_interval))
45 try:
46 heartbeat()
47 if type(fd).__name__ == 'HlsFD':
48 info_dict.update(ie._extract_m3u8_formats(info_dict['url'], info_dict['id'])[0])
49 success = fd.real_download(filename, info_dict)
50 finally:
51 if heartbeat_lock:
52 with heartbeat_lock:
53 timer[0].cancel()
54 download_complete = True
55 return success
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yt_dlp/downloader/niconico.py b/yt_dlp/downloader/niconico.py
--- a/yt_dlp/downloader/niconico.py
+++ b/yt_dlp/downloader/niconico.py
@@ -6,7 +6,7 @@
from .common import FileDownloader
from ..downloader import get_suitable_downloader
from ..extractor.niconico import NiconicoIE
-from ..compat import compat_urllib_request
+from ..utils import sanitized_Request
class NiconicoDmcFD(FileDownloader):
@@ -29,9 +29,11 @@
heartbeat_data = heartbeat_info_dict['data'].encode()
heartbeat_interval = heartbeat_info_dict.get('interval', 30)
+ request = sanitized_Request(heartbeat_url, heartbeat_data)
+
def heartbeat():
try:
- compat_urllib_request.urlopen(url=heartbeat_url, data=heartbeat_data)
+ self.ydl.urlopen(request).read()
except Exception:
self.to_screen('[%s] Heartbeat failed' % self.FD_NAME)
| {"golden_diff": "diff --git a/yt_dlp/downloader/niconico.py b/yt_dlp/downloader/niconico.py\n--- a/yt_dlp/downloader/niconico.py\n+++ b/yt_dlp/downloader/niconico.py\n@@ -6,7 +6,7 @@\n from .common import FileDownloader\n from ..downloader import get_suitable_downloader\n from ..extractor.niconico import NiconicoIE\n-from ..compat import compat_urllib_request\n+from ..utils import sanitized_Request\n \n \n class NiconicoDmcFD(FileDownloader):\n@@ -29,9 +29,11 @@\n heartbeat_data = heartbeat_info_dict['data'].encode()\n heartbeat_interval = heartbeat_info_dict.get('interval', 30)\n \n+ request = sanitized_Request(heartbeat_url, heartbeat_data)\n+\n def heartbeat():\n try:\n- compat_urllib_request.urlopen(url=heartbeat_url, data=heartbeat_data)\n+ self.ydl.urlopen(request).read()\n except Exception:\n self.to_screen('[%s] Heartbeat failed' % self.FD_NAME)\n", "issue": "[niconico_dmc] Heartbeat failed with SOCKS5 `HTTPS?_PROXY` env\n\r\n## Checklist\r\n\r\n- [x] I'm reporting a broken site support\r\n- [x] I've verified that I'm running yt-dlp version **2021.09.02**\r\n- [x] I've checked that all provided URLs are alive and playable in a browser\r\n- [x] I've checked that all URLs and arguments with special characters are properly quoted or escaped\r\n- [x] I've searched the bugtracker for similar issues including closed ones\r\n\r\n\r\n## Verbose log\r\n\r\n```\r\n$ HTTP_PROXY=socks5://127.0.0.1:10080 HTTPS_PROXY=socks5://127.0.0.1:10080 yt-dlp --verbose --proxy socks5://127.0.0.1:10080 https://www.nicovideo.jp/watch/sm9\r\n[debug] Command-line config: ['--verbose', '--proxy', 'socks5://127.0.0.1:10080', 'https://www.nicovideo.jp/watch/sm9']\r\n[debug] Encodings: locale UTF-8, fs utf-8, out utf-8, pref UTF-8\r\n[debug] yt-dlp version 2021.09.02 (source)\r\n[debug] Python version 3.10.0rc2 (CPython 32bit) - Linux-3.10.49-armv7l-with-libc\r\n[debug] exe versions: ffmpeg 4.4, ffprobe 4.4\r\n[debug] Optional libraries: mutagen, pycryptodome, sqlite, websockets\r\n[debug] Proxy map: {'http': 'socks5://127.0.0.1:10080', 'https': 'socks5://127.0.0.1:10080'}\r\n[debug] [niconico] Extracting URL: https://www.nicovideo.jp/watch/sm9\r\n[niconico] sm9: Downloading webpage\r\n[niconico] sm9: Downloading video info page\r\n[debug] Formats sorted by: hasvid, ie_pref, lang, quality, res, fps, vcodec:vp9.2(10), acodec, filesize, fs_approx, tbr, vbr, abr, asr, proto, vext, aext, hasaud, source, id\r\n[debug] Default format spec: bestvideo*+bestaudio/best\r\n[info] sm9: Downloading 1 format(s): h264_360p-aac_128kbps\r\n[debug] Invoking downloader on \"niconico_dmc:sm9/archive_h264_360p/archive_aac_128kbps\"\r\n[niconico_dmc] Downloading from DMC\r\n[niconico] sm9: Downloading JSON metadata for h264_360p-aac_128kbps\r\n[niconico] sm9: Acquiring permission for downloading video\r\n[niconico_dmc] Heartbeat with 40 second interval ...\r\n[niconico_dmc] Heartbeat failed\r\n[download] Destination: \u65b0\u30fb\u8c6a\u8840\u5bfa\u4e00\u65cf -\u7169\u60a9\u89e3\u653e - \u30ec\u30c3\u30c4\u30b4\u30fc\uff01\u9670\u967d\u5e2b [sm9].mp4\r\n[download] 25.4% of 26.95MiB at 174.88KiB/s ETA 01:57[niconico_dmc] Heartbeat failed\r\n[download] 39.8% of 26.95MiB at 137.12KiB/s ETA 02:01[niconico_dmc] Heartbeat failed\r\n[download] 53.6% of 26.95MiB at 124.49KiB/s ETA 01:42[download] Got server HTTP error: Downloaded 15136650 bytes, expected 28261021 bytes. Retrying (attempt 1 of 10) ...\r\n```\r\n\r\n\r\n## Description\r\n\r\nThere seem to be two separate issues:\r\n - niconico_dmc heartbeat connection does not respect the `--proxy` option, and\r\n - it cannot connect via a SOCKS5 server.\r\n\n", "before_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport threading\n\nfrom .common import FileDownloader\nfrom ..downloader import get_suitable_downloader\nfrom ..extractor.niconico import NiconicoIE\nfrom ..compat import compat_urllib_request\n\n\nclass NiconicoDmcFD(FileDownloader):\n \"\"\" Downloading niconico douga from DMC with heartbeat \"\"\"\n\n FD_NAME = 'niconico_dmc'\n\n def real_download(self, filename, info_dict):\n self.to_screen('[%s] Downloading from DMC' % self.FD_NAME)\n\n ie = NiconicoIE(self.ydl)\n info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict)\n\n fd = get_suitable_downloader(info_dict, params=self.params)(self.ydl, self.params)\n\n success = download_complete = False\n timer = [None]\n heartbeat_lock = threading.Lock()\n heartbeat_url = heartbeat_info_dict['url']\n heartbeat_data = heartbeat_info_dict['data'].encode()\n heartbeat_interval = heartbeat_info_dict.get('interval', 30)\n\n def heartbeat():\n try:\n compat_urllib_request.urlopen(url=heartbeat_url, data=heartbeat_data)\n except Exception:\n self.to_screen('[%s] Heartbeat failed' % self.FD_NAME)\n\n with heartbeat_lock:\n if not download_complete:\n timer[0] = threading.Timer(heartbeat_interval, heartbeat)\n timer[0].start()\n\n heartbeat_info_dict['ping']()\n self.to_screen('[%s] Heartbeat with %d second interval ...' % (self.FD_NAME, heartbeat_interval))\n try:\n heartbeat()\n if type(fd).__name__ == 'HlsFD':\n info_dict.update(ie._extract_m3u8_formats(info_dict['url'], info_dict['id'])[0])\n success = fd.real_download(filename, info_dict)\n finally:\n if heartbeat_lock:\n with heartbeat_lock:\n timer[0].cancel()\n download_complete = True\n return success\n", "path": "yt_dlp/downloader/niconico.py"}], "after_files": [{"content": "# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport threading\n\nfrom .common import FileDownloader\nfrom ..downloader import get_suitable_downloader\nfrom ..extractor.niconico import NiconicoIE\nfrom ..utils import sanitized_Request\n\n\nclass NiconicoDmcFD(FileDownloader):\n \"\"\" Downloading niconico douga from DMC with heartbeat \"\"\"\n\n FD_NAME = 'niconico_dmc'\n\n def real_download(self, filename, info_dict):\n self.to_screen('[%s] Downloading from DMC' % self.FD_NAME)\n\n ie = NiconicoIE(self.ydl)\n info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict)\n\n fd = get_suitable_downloader(info_dict, params=self.params)(self.ydl, self.params)\n\n success = download_complete = False\n timer = [None]\n heartbeat_lock = threading.Lock()\n heartbeat_url = heartbeat_info_dict['url']\n heartbeat_data = heartbeat_info_dict['data'].encode()\n heartbeat_interval = heartbeat_info_dict.get('interval', 30)\n\n request = sanitized_Request(heartbeat_url, heartbeat_data)\n\n def heartbeat():\n try:\n self.ydl.urlopen(request).read()\n except Exception:\n self.to_screen('[%s] Heartbeat failed' % self.FD_NAME)\n\n with heartbeat_lock:\n if not download_complete:\n timer[0] = threading.Timer(heartbeat_interval, heartbeat)\n timer[0].start()\n\n heartbeat_info_dict['ping']()\n self.to_screen('[%s] Heartbeat with %d second interval ...' % (self.FD_NAME, heartbeat_interval))\n try:\n heartbeat()\n if type(fd).__name__ == 'HlsFD':\n info_dict.update(ie._extract_m3u8_formats(info_dict['url'], info_dict['id'])[0])\n success = fd.real_download(filename, info_dict)\n finally:\n if heartbeat_lock:\n with heartbeat_lock:\n timer[0].cancel()\n download_complete = True\n return success\n", "path": "yt_dlp/downloader/niconico.py"}]} | 1,783 | 232 |
gh_patches_debug_20623 | rasdani/github-patches | git_diff | svthalia__concrexit-3068 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Expand/improve document part of event endpoint
<!--
Please add the appropriate label for what change should be made:
docs: changes to the documentation)
refactor: refactoring production code, eg. renaming a variable or rewriting a function
test: adding missing tests, refactoring tests; no production code change
chore: updating poetry etc; no production code change
-->
### Describe the change
Change the current URL or add a URL to a link to the actual document (that is, one that ends with `/[name].pdf`).
### Motivation
It's very nice for the app to be able to just fetch files directly.
Also, I don't think the current URL is even necessary because you also have the PK with which you can create the URL.
### Current implementation
There is a URL that leads to the file, but only after a redirect to `cdn`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/documents/api/v2/serializers/document.py`
Content:
```
1 from rest_framework.fields import SerializerMethodField
2 from rest_framework.reverse import reverse
3
4 from documents.models import Document
5 from thaliawebsite.api.v2.serializers.cleaned_model_serializer import (
6 CleanedModelSerializer,
7 )
8
9
10 class DocumentSerializer(CleanedModelSerializer):
11 class Meta:
12 model = Document
13 fields = ("pk", "name", "url", "category", "members_only")
14
15 url = SerializerMethodField("_url")
16
17 def _url(self, instance):
18 return self.context["request"].build_absolute_uri(
19 reverse("documents:document", kwargs={"pk": instance.pk})
20 )
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/documents/api/v2/serializers/document.py b/website/documents/api/v2/serializers/document.py
--- a/website/documents/api/v2/serializers/document.py
+++ b/website/documents/api/v2/serializers/document.py
@@ -1,10 +1,10 @@
from rest_framework.fields import SerializerMethodField
-from rest_framework.reverse import reverse
from documents.models import Document
from thaliawebsite.api.v2.serializers.cleaned_model_serializer import (
CleanedModelSerializer,
)
+from utils.media.services import get_media_url
class DocumentSerializer(CleanedModelSerializer):
@@ -15,6 +15,10 @@
url = SerializerMethodField("_url")
def _url(self, instance):
- return self.context["request"].build_absolute_uri(
- reverse("documents:document", kwargs={"pk": instance.pk})
- )
+ if instance.members_only and (
+ not self.request.user.is_authenticated
+ or not self.request.member.has_active_membership()
+ ):
+ return self.request.build_absolute_uri(instance.get_absolute_url())
+
+ return get_media_url(instance.file, absolute_url=True)
| {"golden_diff": "diff --git a/website/documents/api/v2/serializers/document.py b/website/documents/api/v2/serializers/document.py\n--- a/website/documents/api/v2/serializers/document.py\n+++ b/website/documents/api/v2/serializers/document.py\n@@ -1,10 +1,10 @@\n from rest_framework.fields import SerializerMethodField\n-from rest_framework.reverse import reverse\n \n from documents.models import Document\n from thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n )\n+from utils.media.services import get_media_url\n \n \n class DocumentSerializer(CleanedModelSerializer):\n@@ -15,6 +15,10 @@\n url = SerializerMethodField(\"_url\")\n \n def _url(self, instance):\n- return self.context[\"request\"].build_absolute_uri(\n- reverse(\"documents:document\", kwargs={\"pk\": instance.pk})\n- )\n+ if instance.members_only and (\n+ not self.request.user.is_authenticated\n+ or not self.request.member.has_active_membership()\n+ ):\n+ return self.request.build_absolute_uri(instance.get_absolute_url())\n+\n+ return get_media_url(instance.file, absolute_url=True)\n", "issue": "Expand/improve document part of event endpoint\n<!--\r\n\r\nPlease add the appropriate label for what change should be made:\r\ndocs: changes to the documentation)\r\nrefactor: refactoring production code, eg. renaming a variable or rewriting a function\r\ntest: adding missing tests, refactoring tests; no production code change\r\nchore: updating poetry etc; no production code change\r\n\r\n-->\r\n\r\n### Describe the change\r\nChange the current URL or add a URL to a link to the actual document (that is, one that ends with `/[name].pdf`).\r\n\r\n### Motivation\r\nIt's very nice for the app to be able to just fetch files directly.\r\nAlso, I don't think the current URL is even necessary because you also have the PK with which you can create the URL.\r\n\r\n### Current implementation\r\nThere is a URL that leads to the file, but only after a redirect to `cdn`.\n", "before_files": [{"content": "from rest_framework.fields import SerializerMethodField\nfrom rest_framework.reverse import reverse\n\nfrom documents.models import Document\nfrom thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n)\n\n\nclass DocumentSerializer(CleanedModelSerializer):\n class Meta:\n model = Document\n fields = (\"pk\", \"name\", \"url\", \"category\", \"members_only\")\n\n url = SerializerMethodField(\"_url\")\n\n def _url(self, instance):\n return self.context[\"request\"].build_absolute_uri(\n reverse(\"documents:document\", kwargs={\"pk\": instance.pk})\n )\n", "path": "website/documents/api/v2/serializers/document.py"}], "after_files": [{"content": "from rest_framework.fields import SerializerMethodField\n\nfrom documents.models import Document\nfrom thaliawebsite.api.v2.serializers.cleaned_model_serializer import (\n CleanedModelSerializer,\n)\nfrom utils.media.services import get_media_url\n\n\nclass DocumentSerializer(CleanedModelSerializer):\n class Meta:\n model = Document\n fields = (\"pk\", \"name\", \"url\", \"category\", \"members_only\")\n\n url = SerializerMethodField(\"_url\")\n\n def _url(self, instance):\n if instance.members_only and (\n not self.request.user.is_authenticated\n or not self.request.member.has_active_membership()\n ):\n return self.request.build_absolute_uri(instance.get_absolute_url())\n\n return get_media_url(instance.file, absolute_url=True)\n", "path": "website/documents/api/v2/serializers/document.py"}]} | 608 | 251 |
gh_patches_debug_51282 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-636 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add a standard way to "reset" a Configuration object for testing
It is a common occurrence in tests that the global `Configuration` object needs to be "reset" between tests. This means that its attributes need to be set back to their original values. Since the `Configuration` object is immutable by design, some additional, non-production available mechanism is needed to perform this action.
The need for this feature was mentioned in a [conversation](https://github.com/open-telemetry/opentelemetry-python/pull/630#discussion_r418343720) in #630.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-api/src/opentelemetry/configuration/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # FIXME find a better way to avoid all those "Expression has type "Any"" errors
16 # type: ignore
17
18 """
19 Simple configuration manager
20
21 This is a configuration manager for OpenTelemetry. It reads configuration
22 values from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose
23 characters are only alphanumeric characters and unserscores, except for the
24 first character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.
25
26 For example, these environment variables will be read:
27
28 1. ``OPENTELEMETRY_PYTHON_SOMETHING``
29 2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``
30 3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``
31 4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``
32 4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``
33
34 These won't:
35
36 1. ``OPENTELEMETRY_PYTH_SOMETHING``
37 2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``
38 3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``
39
40 The values stored in the environment variables can be found in an instance of
41 ``opentelemetry.configuration.Configuration``. This class can be instantiated
42 freely because instantiating it returns always the same object.
43
44 For example, if the environment variable
45 ``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then
46 ``Configuration().meter_provider == "my_meter_provider"`` would be ``True``.
47
48 Non defined attributes will always return ``None``. This is intended to make it
49 easier to use the ``Configuration`` object in actual code, because it won't be
50 necessary to check for the attribute to be defined first.
51
52 Environment variables used by OpenTelemetry
53 -------------------------------------------
54
55 1. OPENTELEMETRY_PYTHON_METER_PROVIDER
56 2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER
57
58 The value of these environment variables should be the name of the entry point
59 that points to the class that implements either provider. This OpenTelemetry
60 API package provides one entry point for each, which can be found in the
61 setup.py file::
62
63 entry_points={
64 ...
65 "opentelemetry_meter_provider": [
66 "default_meter_provider = "
67 "opentelemetry.metrics:DefaultMeterProvider"
68 ],
69 "opentelemetry_tracer_provider": [
70 "default_tracer_provider = "
71 "opentelemetry.trace:DefaultTracerProvider"
72 ],
73 }
74
75 To use the meter provider above, then the
76 ``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to
77 "default_meter_provider" (this is not actually necessary since the
78 OpenTelemetry API provided providers are the default ones used if no
79 configuration is found in the environment variables).
80 """
81
82 from os import environ
83 from re import fullmatch
84
85
86 class Configuration:
87 _instance = None
88
89 __slots__ = []
90
91 def __new__(cls) -> "Configuration":
92 if Configuration._instance is None:
93
94 for key, value in environ.items():
95
96 match = fullmatch(
97 r"OPENTELEMETRY_PYTHON_([A-Za-z_][\w_]*)", key
98 )
99
100 if match is not None:
101
102 key = match.group(1)
103
104 setattr(Configuration, "_{}".format(key), value)
105 setattr(
106 Configuration,
107 key,
108 property(
109 fget=lambda cls, key=key: getattr(
110 cls, "_{}".format(key)
111 )
112 ),
113 )
114
115 Configuration.__slots__.append(key)
116
117 Configuration.__slots__ = tuple(Configuration.__slots__)
118
119 Configuration._instance = object.__new__(cls)
120
121 return cls._instance
122
123 def __getattr__(self, name):
124 return None
125
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opentelemetry-api/src/opentelemetry/configuration/__init__.py b/opentelemetry-api/src/opentelemetry/configuration/__init__.py
--- a/opentelemetry-api/src/opentelemetry/configuration/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/configuration/__init__.py
@@ -122,3 +122,20 @@
def __getattr__(self, name):
return None
+
+ @classmethod
+ def _reset(cls):
+ """
+ This method "resets" the global configuration attributes
+
+ It is not intended to be used by production code but by testing code
+ only.
+ """
+
+ for slot in cls.__slots__:
+ if slot in cls.__dict__.keys():
+ delattr(cls, slot)
+ delattr(cls, "_{}".format(slot))
+
+ cls.__slots__ = []
+ cls._instance = None
| {"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/configuration/__init__.py b/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n@@ -122,3 +122,20 @@\n \n def __getattr__(self, name):\n return None\n+\n+ @classmethod\n+ def _reset(cls):\n+ \"\"\"\n+ This method \"resets\" the global configuration attributes\n+\n+ It is not intended to be used by production code but by testing code\n+ only.\n+ \"\"\"\n+\n+ for slot in cls.__slots__:\n+ if slot in cls.__dict__.keys():\n+ delattr(cls, slot)\n+ delattr(cls, \"_{}\".format(slot))\n+\n+ cls.__slots__ = []\n+ cls._instance = None\n", "issue": "Add a standard way to \"reset\" a Configuration object for testing\nIt is a common occurrence in tests that the global `Configuration` object needs to be \"reset\" between tests. This means that its attributes need to be set back to their original values. Since the `Configuration` object is immutable by design, some additional, non-production available mechanism is needed to perform this action.\r\n\r\nThe need for this feature was mentioned in a [conversation](https://github.com/open-telemetry/opentelemetry-python/pull/630#discussion_r418343720) in #630.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# FIXME find a better way to avoid all those \"Expression has type \"Any\"\" errors\n# type: ignore\n\n\"\"\"\nSimple configuration manager\n\nThis is a configuration manager for OpenTelemetry. It reads configuration\nvalues from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose\ncharacters are only alphanumeric characters and unserscores, except for the\nfirst character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.\n\nFor example, these environment variables will be read:\n\n1. ``OPENTELEMETRY_PYTHON_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``\n4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``\n4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``\n\nThese won't:\n\n1. ``OPENTELEMETRY_PYTH_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``\n\nThe values stored in the environment variables can be found in an instance of\n``opentelemetry.configuration.Configuration``. This class can be instantiated\nfreely because instantiating it returns always the same object.\n\nFor example, if the environment variable\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then\n``Configuration().meter_provider == \"my_meter_provider\"`` would be ``True``.\n\nNon defined attributes will always return ``None``. This is intended to make it\neasier to use the ``Configuration`` object in actual code, because it won't be\nnecessary to check for the attribute to be defined first.\n\nEnvironment variables used by OpenTelemetry\n-------------------------------------------\n\n1. OPENTELEMETRY_PYTHON_METER_PROVIDER\n2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER\n\nThe value of these environment variables should be the name of the entry point\nthat points to the class that implements either provider. This OpenTelemetry\nAPI package provides one entry point for each, which can be found in the\nsetup.py file::\n\n entry_points={\n ...\n \"opentelemetry_meter_provider\": [\n \"default_meter_provider = \"\n \"opentelemetry.metrics:DefaultMeterProvider\"\n ],\n \"opentelemetry_tracer_provider\": [\n \"default_tracer_provider = \"\n \"opentelemetry.trace:DefaultTracerProvider\"\n ],\n }\n\nTo use the meter provider above, then the\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to\n\"default_meter_provider\" (this is not actually necessary since the\nOpenTelemetry API provided providers are the default ones used if no\nconfiguration is found in the environment variables).\n\"\"\"\n\nfrom os import environ\nfrom re import fullmatch\n\n\nclass Configuration:\n _instance = None\n\n __slots__ = []\n\n def __new__(cls) -> \"Configuration\":\n if Configuration._instance is None:\n\n for key, value in environ.items():\n\n match = fullmatch(\n r\"OPENTELEMETRY_PYTHON_([A-Za-z_][\\w_]*)\", key\n )\n\n if match is not None:\n\n key = match.group(1)\n\n setattr(Configuration, \"_{}\".format(key), value)\n setattr(\n Configuration,\n key,\n property(\n fget=lambda cls, key=key: getattr(\n cls, \"_{}\".format(key)\n )\n ),\n )\n\n Configuration.__slots__.append(key)\n\n Configuration.__slots__ = tuple(Configuration.__slots__)\n\n Configuration._instance = object.__new__(cls)\n\n return cls._instance\n\n def __getattr__(self, name):\n return None\n", "path": "opentelemetry-api/src/opentelemetry/configuration/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# FIXME find a better way to avoid all those \"Expression has type \"Any\"\" errors\n# type: ignore\n\n\"\"\"\nSimple configuration manager\n\nThis is a configuration manager for OpenTelemetry. It reads configuration\nvalues from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose\ncharacters are only alphanumeric characters and unserscores, except for the\nfirst character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.\n\nFor example, these environment variables will be read:\n\n1. ``OPENTELEMETRY_PYTHON_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``\n4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``\n4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``\n\nThese won't:\n\n1. ``OPENTELEMETRY_PYTH_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``\n\nThe values stored in the environment variables can be found in an instance of\n``opentelemetry.configuration.Configuration``. This class can be instantiated\nfreely because instantiating it returns always the same object.\n\nFor example, if the environment variable\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then\n``Configuration().meter_provider == \"my_meter_provider\"`` would be ``True``.\n\nNon defined attributes will always return ``None``. This is intended to make it\neasier to use the ``Configuration`` object in actual code, because it won't be\nnecessary to check for the attribute to be defined first.\n\nEnvironment variables used by OpenTelemetry\n-------------------------------------------\n\n1. OPENTELEMETRY_PYTHON_METER_PROVIDER\n2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER\n\nThe value of these environment variables should be the name of the entry point\nthat points to the class that implements either provider. This OpenTelemetry\nAPI package provides one entry point for each, which can be found in the\nsetup.py file::\n\n entry_points={\n ...\n \"opentelemetry_meter_provider\": [\n \"default_meter_provider = \"\n \"opentelemetry.metrics:DefaultMeterProvider\"\n ],\n \"opentelemetry_tracer_provider\": [\n \"default_tracer_provider = \"\n \"opentelemetry.trace:DefaultTracerProvider\"\n ],\n }\n\nTo use the meter provider above, then the\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to\n\"default_meter_provider\" (this is not actually necessary since the\nOpenTelemetry API provided providers are the default ones used if no\nconfiguration is found in the environment variables).\n\"\"\"\n\nfrom os import environ\nfrom re import fullmatch\n\n\nclass Configuration:\n _instance = None\n\n __slots__ = []\n\n def __new__(cls) -> \"Configuration\":\n if Configuration._instance is None:\n\n for key, value in environ.items():\n\n match = fullmatch(\n r\"OPENTELEMETRY_PYTHON_([A-Za-z_][\\w_]*)\", key\n )\n\n if match is not None:\n\n key = match.group(1)\n\n setattr(Configuration, \"_{}\".format(key), value)\n setattr(\n Configuration,\n key,\n property(\n fget=lambda cls, key=key: getattr(\n cls, \"_{}\".format(key)\n )\n ),\n )\n\n Configuration.__slots__.append(key)\n\n Configuration.__slots__ = tuple(Configuration.__slots__)\n\n Configuration._instance = object.__new__(cls)\n\n return cls._instance\n\n def __getattr__(self, name):\n return None\n\n @classmethod\n def _reset(cls):\n \"\"\"\n This method \"resets\" the global configuration attributes\n\n It is not intended to be used by production code but by testing code\n only.\n \"\"\"\n\n for slot in cls.__slots__:\n if slot in cls.__dict__.keys():\n delattr(cls, slot)\n delattr(cls, \"_{}\".format(slot))\n\n cls.__slots__ = []\n cls._instance = None\n", "path": "opentelemetry-api/src/opentelemetry/configuration/__init__.py"}]} | 1,632 | 205 |
gh_patches_debug_3505 | rasdani/github-patches | git_diff | saleor__saleor-1832 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Localization of duplicate email account doesn't make sense
I saw this weird thing in post production when I was creating a customer account that was already registered.


---
Edit: note, the untranslated string is ` User with this Email already exists. `
---
I'm not quite sure how to explain this, it doesn't make sense in other language, but make sense in English. It's understandable for a French or a Polish for example but doesn't make real sense it shouldn't be that way, it looks like a debug string (as a dev).
I will wait to hear back from you. I also know it's not from your side, so I'm not quite sure of what you can do about this.
The error is generated by `django.db.models.base.Model#unique_error_message` (sourcing from `django.forms.models.BaseModelForm#_post_clean`).
```python
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
params = {
'model': self,
'model_class': model_class,
'model_name': capfirst(opts.verbose_name),
'unique_check': unique_check,
}
# A unique field
if len(unique_check) == 1:
field = opts.get_field(unique_check[0])
params['field_label'] = capfirst(field.verbose_name)
return ValidationError(
message=field.error_messages['unique'],
code='unique',
params=params,
)
# unique_together
else:
field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
params['field_labels'] = get_text_list(field_labels, _('and'))
return ValidationError(
message=_("%(model_name)s with this %(field_labels)s already exists."),
code='unique_together',
params=params,
)
# ^^^^^^
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/account/forms.py`
Content:
```
1 from django import forms
2 from django.conf import settings
3 from django.contrib.auth import forms as django_forms, update_session_auth_hash
4 from django.utils.translation import pgettext, pgettext_lazy
5 from phonenumbers.phonenumberutil import country_code_for_region
6
7 from ..account.models import User
8 from .i18n import AddressMetaForm, get_address_form_class
9 from . import emails
10
11
12 def get_address_form(
13 data, country_code, initial=None, instance=None, **kwargs):
14 country_form = AddressMetaForm(data, initial=initial)
15 preview = False
16 if country_form.is_valid():
17 country_code = country_form.cleaned_data['country']
18 preview = country_form.cleaned_data['preview']
19
20 if initial is None and country_code:
21 initial = {}
22 if country_code:
23 initial['phone'] = '+{}'.format(country_code_for_region(country_code))
24
25 address_form_class = get_address_form_class(country_code)
26
27 if not preview and instance is not None:
28 address_form_class = get_address_form_class(
29 instance.country.code)
30 address_form = address_form_class(
31 data, instance=instance, **kwargs)
32 else:
33 initial_address = (
34 initial if not preview
35 else data.dict() if data is not None else data)
36 address_form = address_form_class(
37 not preview and data or None,
38 initial=initial_address,
39 **kwargs)
40 return address_form, preview
41
42
43 class ChangePasswordForm(django_forms.PasswordChangeForm):
44 def __init__(self, *args, **kwargs):
45 super().__init__(*args, **kwargs)
46 self.fields['new_password1'].user = self.user
47 self.fields['old_password'].widget.attrs['placeholder'] = ''
48 self.fields['new_password1'].widget.attrs['placeholder'] = ''
49 del self.fields['new_password2']
50
51
52 def logout_on_password_change(request, user):
53 if (update_session_auth_hash is not None and
54 not settings.LOGOUT_ON_PASSWORD_CHANGE):
55 update_session_auth_hash(request, user)
56
57
58 class LoginForm(django_forms.AuthenticationForm):
59 username = forms.EmailField(
60 label=pgettext('Form field', 'Email'), max_length=75)
61
62 def __init__(self, request=None, *args, **kwargs):
63 super().__init__(request=request, *args, **kwargs)
64 if request:
65 email = request.GET.get('email')
66 if email:
67 self.fields['username'].initial = email
68
69
70 class SignupForm(forms.ModelForm):
71 password = forms.CharField(
72 widget=forms.PasswordInput)
73
74 class Meta:
75 model = User
76 fields = ('email',)
77 labels = {
78 'email': pgettext_lazy(
79 'Email', 'Email'),
80 'password': pgettext_lazy(
81 'Password', 'Password')}
82
83 def __init__(self, *args, **kwargs):
84 super().__init__(*args, **kwargs)
85 if self._meta.model.USERNAME_FIELD in self.fields:
86 self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update(
87 {'autofocus': ''})
88
89 def save(self, request=None, commit=True):
90 user = super().save(commit=False)
91 password = self.cleaned_data['password']
92 user.set_password(password)
93 if commit:
94 user.save()
95 return user
96
97
98 class PasswordResetForm(django_forms.PasswordResetForm):
99 """Allow resetting passwords.
100
101 This subclass overrides sending emails to use templated email.
102 """
103
104 def get_users(self, email):
105 active_users = User.objects.filter(email__iexact=email, is_active=True)
106 return active_users
107
108 def send_mail(
109 self, subject_template_name, email_template_name, context,
110 from_email, to_email, html_email_template_name=None):
111 emails.send_password_reset_email.delay(context, to_email)
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/account/forms.py b/saleor/account/forms.py
--- a/saleor/account/forms.py
+++ b/saleor/account/forms.py
@@ -70,6 +70,11 @@
class SignupForm(forms.ModelForm):
password = forms.CharField(
widget=forms.PasswordInput)
+ email = forms.EmailField(
+ error_messages={
+ 'unique': pgettext_lazy(
+ 'Registration error',
+ 'This email has already been registered.')})
class Meta:
model = User
| {"golden_diff": "diff --git a/saleor/account/forms.py b/saleor/account/forms.py\n--- a/saleor/account/forms.py\n+++ b/saleor/account/forms.py\n@@ -70,6 +70,11 @@\n class SignupForm(forms.ModelForm):\n password = forms.CharField(\n widget=forms.PasswordInput)\n+ email = forms.EmailField(\n+ error_messages={\n+ 'unique': pgettext_lazy(\n+ 'Registration error',\n+ 'This email has already been registered.')})\n \n class Meta:\n model = User\n", "issue": "Localization of duplicate email account doesn't make sense\nI saw this weird thing in post production when I was creating a customer account that was already registered.\r\n\r\n\r\n\r\n\r\n\r\n---\r\n\r\nEdit: note, the untranslated string is ` User with this Email already exists. `\r\n\r\n---\r\n\r\nI'm not quite sure how to explain this, it doesn't make sense in other language, but make sense in English. It's understandable for a French or a Polish for example but doesn't make real sense it shouldn't be that way, it looks like a debug string (as a dev).\r\n\r\nI will wait to hear back from you. I also know it's not from your side, so I'm not quite sure of what you can do about this.\r\n\r\nThe error is generated by `django.db.models.base.Model#unique_error_message` (sourcing from `django.forms.models.BaseModelForm#_post_clean`).\r\n```python\r\n def unique_error_message(self, model_class, unique_check):\r\n opts = model_class._meta\r\n\r\n params = {\r\n 'model': self,\r\n 'model_class': model_class,\r\n 'model_name': capfirst(opts.verbose_name),\r\n 'unique_check': unique_check,\r\n }\r\n\r\n # A unique field\r\n if len(unique_check) == 1:\r\n field = opts.get_field(unique_check[0])\r\n params['field_label'] = capfirst(field.verbose_name)\r\n return ValidationError(\r\n message=field.error_messages['unique'],\r\n code='unique',\r\n params=params,\r\n )\r\n\r\n # unique_together\r\n else:\r\n field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]\r\n params['field_labels'] = get_text_list(field_labels, _('and'))\r\n return ValidationError(\r\n message=_(\"%(model_name)s with this %(field_labels)s already exists.\"),\r\n code='unique_together',\r\n params=params,\r\n )\r\n # ^^^^^^\r\n```\n", "before_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import forms as django_forms, update_session_auth_hash\nfrom django.utils.translation import pgettext, pgettext_lazy\nfrom phonenumbers.phonenumberutil import country_code_for_region\n\nfrom ..account.models import User\nfrom .i18n import AddressMetaForm, get_address_form_class\nfrom . import emails\n\n\ndef get_address_form(\n data, country_code, initial=None, instance=None, **kwargs):\n country_form = AddressMetaForm(data, initial=initial)\n preview = False\n if country_form.is_valid():\n country_code = country_form.cleaned_data['country']\n preview = country_form.cleaned_data['preview']\n\n if initial is None and country_code:\n initial = {}\n if country_code:\n initial['phone'] = '+{}'.format(country_code_for_region(country_code))\n\n address_form_class = get_address_form_class(country_code)\n\n if not preview and instance is not None:\n address_form_class = get_address_form_class(\n instance.country.code)\n address_form = address_form_class(\n data, instance=instance, **kwargs)\n else:\n initial_address = (\n initial if not preview\n else data.dict() if data is not None else data)\n address_form = address_form_class(\n not preview and data or None,\n initial=initial_address,\n **kwargs)\n return address_form, preview\n\n\nclass ChangePasswordForm(django_forms.PasswordChangeForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['new_password1'].user = self.user\n self.fields['old_password'].widget.attrs['placeholder'] = ''\n self.fields['new_password1'].widget.attrs['placeholder'] = ''\n del self.fields['new_password2']\n\n\ndef logout_on_password_change(request, user):\n if (update_session_auth_hash is not None and\n not settings.LOGOUT_ON_PASSWORD_CHANGE):\n update_session_auth_hash(request, user)\n\n\nclass LoginForm(django_forms.AuthenticationForm):\n username = forms.EmailField(\n label=pgettext('Form field', 'Email'), max_length=75)\n\n def __init__(self, request=None, *args, **kwargs):\n super().__init__(request=request, *args, **kwargs)\n if request:\n email = request.GET.get('email')\n if email:\n self.fields['username'].initial = email\n\n\nclass SignupForm(forms.ModelForm):\n password = forms.CharField(\n widget=forms.PasswordInput)\n\n class Meta:\n model = User\n fields = ('email',)\n labels = {\n 'email': pgettext_lazy(\n 'Email', 'Email'),\n 'password': pgettext_lazy(\n 'Password', 'Password')}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self._meta.model.USERNAME_FIELD in self.fields:\n self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update(\n {'autofocus': ''})\n\n def save(self, request=None, commit=True):\n user = super().save(commit=False)\n password = self.cleaned_data['password']\n user.set_password(password)\n if commit:\n user.save()\n return user\n\n\nclass PasswordResetForm(django_forms.PasswordResetForm):\n \"\"\"Allow resetting passwords.\n\n This subclass overrides sending emails to use templated email.\n \"\"\"\n\n def get_users(self, email):\n active_users = User.objects.filter(email__iexact=email, is_active=True)\n return active_users\n\n def send_mail(\n self, subject_template_name, email_template_name, context,\n from_email, to_email, html_email_template_name=None):\n emails.send_password_reset_email.delay(context, to_email)\n", "path": "saleor/account/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.contrib.auth import forms as django_forms, update_session_auth_hash\nfrom django.utils.translation import pgettext, pgettext_lazy\nfrom phonenumbers.phonenumberutil import country_code_for_region\n\nfrom ..account.models import User\nfrom .i18n import AddressMetaForm, get_address_form_class\nfrom . import emails\n\n\ndef get_address_form(\n data, country_code, initial=None, instance=None, **kwargs):\n country_form = AddressMetaForm(data, initial=initial)\n preview = False\n if country_form.is_valid():\n country_code = country_form.cleaned_data['country']\n preview = country_form.cleaned_data['preview']\n\n if initial is None and country_code:\n initial = {}\n if country_code:\n initial['phone'] = '+{}'.format(country_code_for_region(country_code))\n\n address_form_class = get_address_form_class(country_code)\n\n if not preview and instance is not None:\n address_form_class = get_address_form_class(\n instance.country.code)\n address_form = address_form_class(\n data, instance=instance, **kwargs)\n else:\n initial_address = (\n initial if not preview\n else data.dict() if data is not None else data)\n address_form = address_form_class(\n not preview and data or None,\n initial=initial_address,\n **kwargs)\n return address_form, preview\n\n\nclass ChangePasswordForm(django_forms.PasswordChangeForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['new_password1'].user = self.user\n self.fields['old_password'].widget.attrs['placeholder'] = ''\n self.fields['new_password1'].widget.attrs['placeholder'] = ''\n del self.fields['new_password2']\n\n\ndef logout_on_password_change(request, user):\n if (update_session_auth_hash is not None and\n not settings.LOGOUT_ON_PASSWORD_CHANGE):\n update_session_auth_hash(request, user)\n\n\nclass LoginForm(django_forms.AuthenticationForm):\n username = forms.EmailField(\n label=pgettext('Form field', 'Email'), max_length=75)\n\n def __init__(self, request=None, *args, **kwargs):\n super().__init__(request=request, *args, **kwargs)\n if request:\n email = request.GET.get('email')\n if email:\n self.fields['username'].initial = email\n\n\nclass SignupForm(forms.ModelForm):\n password = forms.CharField(\n widget=forms.PasswordInput)\n email = forms.EmailField(\n error_messages={\n 'unique': pgettext_lazy(\n 'Registration error',\n 'This email has already been registered.')})\n\n class Meta:\n model = User\n fields = ('email',)\n labels = {\n 'email': pgettext_lazy(\n 'Email', 'Email'),\n 'password': pgettext_lazy(\n 'Password', 'Password')}\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if self._meta.model.USERNAME_FIELD in self.fields:\n self.fields[self._meta.model.USERNAME_FIELD].widget.attrs.update(\n {'autofocus': ''})\n\n def save(self, request=None, commit=True):\n user = super().save(commit=False)\n password = self.cleaned_data['password']\n user.set_password(password)\n if commit:\n user.save()\n return user\n\n\nclass PasswordResetForm(django_forms.PasswordResetForm):\n \"\"\"Allow resetting passwords.\n\n This subclass overrides sending emails to use templated email.\n \"\"\"\n\n def get_users(self, email):\n active_users = User.objects.filter(email__iexact=email, is_active=True)\n return active_users\n\n def send_mail(\n self, subject_template_name, email_template_name, context,\n from_email, to_email, html_email_template_name=None):\n emails.send_password_reset_email.delay(context, to_email)\n", "path": "saleor/account/forms.py"}]} | 1,717 | 116 |
gh_patches_debug_10163 | rasdani/github-patches | git_diff | pytorch__pytorch-2200 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
DataParallel tests are currently broken
https://github.com/pytorch/pytorch/pull/2121/commits/d69669efcfe4333c223f53249185c2e22f76ed73 has broken DataParallel tests. Now that device_ids are explicitly sent to parallel_apply, this assert https://github.com/pytorch/pytorch/blob/master/torch/nn/parallel/parallel_apply.py#L30 gets triggered if inputs are not big enough to be on all devices (e.g. batch size of 20 on 8 GPUs gets chunked into 6*3+2, so that 8-th GPU is idle, and assert gets triggered).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch/nn/parallel/data_parallel.py`
Content:
```
1 import torch
2 from ..modules import Module
3 from .scatter_gather import scatter_kwargs, gather
4 from .replicate import replicate
5 from .parallel_apply import parallel_apply
6
7
8 class DataParallel(Module):
9 """Implements data parallelism at the module level.
10
11 This container parallelizes the application of the given module by
12 splitting the input across the specified devices by chunking in the batch
13 dimension. In the forward pass, the module is replicated on each device,
14 and each replica handles a portion of the input. During the backwards
15 pass, gradients from each replica are summed into the original module.
16
17 The batch size should be larger than the number of GPUs used. It should
18 also be an integer multiple of the number of GPUs so that each chunk is the
19 same size (so that each GPU processes the same number of samples).
20
21 See also: :ref:`cuda-nn-dataparallel-instead`
22
23 Arbitrary positional and keyword inputs are allowed to be passed into
24 DataParallel EXCEPT Tensors. All variables will be scattered on dim
25 specified (default 0). Primitive types will be broadcasted, but all
26 other types will be a shallow copy and can be corrupted if written to in
27 the model's forward pass.
28
29 Args:
30 module: module to be parallelized
31 device_ids: CUDA devices (default: all devices)
32 output_device: device location of output (default: device_ids[0])
33
34 Example::
35
36 >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
37 >>> output = net(input_var)
38 """
39
40 # TODO: update notes/cuda.rst when this class handles 8+ GPUs well
41
42 def __init__(self, module, device_ids=None, output_device=None, dim=0):
43 super(DataParallel, self).__init__()
44 if device_ids is None:
45 device_ids = list(range(torch.cuda.device_count()))
46 if output_device is None:
47 output_device = device_ids[0]
48 self.dim = dim
49 self.module = module
50 self.device_ids = device_ids
51 self.output_device = output_device
52 if len(self.device_ids) == 1:
53 self.module.cuda(device_ids[0])
54
55 def forward(self, *inputs, **kwargs):
56 inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
57 if len(self.device_ids) == 1:
58 return self.module(*inputs[0], **kwargs[0])
59 replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
60 outputs = self.parallel_apply(replicas, inputs, kwargs)
61 return self.gather(outputs, self.output_device)
62
63 def replicate(self, module, device_ids):
64 return replicate(module, device_ids)
65
66 def scatter(self, inputs, kwargs, device_ids):
67 return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
68
69 def parallel_apply(self, replicas, inputs, kwargs):
70 return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
71
72 def gather(self, outputs, output_device):
73 return gather(outputs, output_device, dim=self.dim)
74
75
76 def data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
77 """Evaluates module(input) in parallel across the GPUs given in device_ids.
78
79 This is the functional version of the DataParallel module.
80
81 Args:
82 module: the module to evaluate in parallel
83 inputs: inputs to the module
84 device_ids: GPU ids on which to replicate module
85 output_device: GPU location of the output Use -1 to indicate the CPU.
86 (default: device_ids[0])
87 Returns:
88 a Variable containing the result of module(input) located on
89 output_device
90 """
91 if not isinstance(inputs, tuple):
92 inputs = (inputs,)
93
94 if device_ids is None:
95 device_ids = list(range(torch.cuda.device_count()))
96
97 if output_device is None:
98 output_device = device_ids[0]
99
100 inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
101 if len(device_ids) == 1:
102 return module(*inputs[0], **module_kwargs[0])
103 replicas = replicate(module, device_ids[:len(inputs)])
104 outputs = parallel_apply(replicas, inputs, module_kwargs, device_ids)
105 return gather(outputs, output_device, dim)
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torch/nn/parallel/data_parallel.py b/torch/nn/parallel/data_parallel.py
--- a/torch/nn/parallel/data_parallel.py
+++ b/torch/nn/parallel/data_parallel.py
@@ -100,6 +100,7 @@
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
if len(device_ids) == 1:
return module(*inputs[0], **module_kwargs[0])
- replicas = replicate(module, device_ids[:len(inputs)])
- outputs = parallel_apply(replicas, inputs, module_kwargs, device_ids)
+ used_device_ids = device_ids[:len(inputs)]
+ replicas = replicate(module, used_device_ids)
+ outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
return gather(outputs, output_device, dim)
| {"golden_diff": "diff --git a/torch/nn/parallel/data_parallel.py b/torch/nn/parallel/data_parallel.py\n--- a/torch/nn/parallel/data_parallel.py\n+++ b/torch/nn/parallel/data_parallel.py\n@@ -100,6 +100,7 @@\n inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)\n if len(device_ids) == 1:\n return module(*inputs[0], **module_kwargs[0])\n- replicas = replicate(module, device_ids[:len(inputs)])\n- outputs = parallel_apply(replicas, inputs, module_kwargs, device_ids)\n+ used_device_ids = device_ids[:len(inputs)]\n+ replicas = replicate(module, used_device_ids)\n+ outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)\n return gather(outputs, output_device, dim)\n", "issue": "DataParallel tests are currently broken \nhttps://github.com/pytorch/pytorch/pull/2121/commits/d69669efcfe4333c223f53249185c2e22f76ed73 has broken DataParallel tests. Now that device_ids are explicitly sent to parallel_apply, this assert https://github.com/pytorch/pytorch/blob/master/torch/nn/parallel/parallel_apply.py#L30 gets triggered if inputs are not big enough to be on all devices (e.g. batch size of 20 on 8 GPUs gets chunked into 6*3+2, so that 8-th GPU is idle, and assert gets triggered). \r\n\n", "before_files": [{"content": "import torch\nfrom ..modules import Module\nfrom .scatter_gather import scatter_kwargs, gather\nfrom .replicate import replicate\nfrom .parallel_apply import parallel_apply\n\n\nclass DataParallel(Module):\n \"\"\"Implements data parallelism at the module level.\n\n This container parallelizes the application of the given module by\n splitting the input across the specified devices by chunking in the batch\n dimension. In the forward pass, the module is replicated on each device,\n and each replica handles a portion of the input. During the backwards\n pass, gradients from each replica are summed into the original module.\n\n The batch size should be larger than the number of GPUs used. It should\n also be an integer multiple of the number of GPUs so that each chunk is the\n same size (so that each GPU processes the same number of samples).\n\n See also: :ref:`cuda-nn-dataparallel-instead`\n\n Arbitrary positional and keyword inputs are allowed to be passed into\n DataParallel EXCEPT Tensors. All variables will be scattered on dim\n specified (default 0). Primitive types will be broadcasted, but all\n other types will be a shallow copy and can be corrupted if written to in\n the model's forward pass.\n\n Args:\n module: module to be parallelized\n device_ids: CUDA devices (default: all devices)\n output_device: device location of output (default: device_ids[0])\n\n Example::\n\n >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])\n >>> output = net(input_var)\n \"\"\"\n\n # TODO: update notes/cuda.rst when this class handles 8+ GPUs well\n\n def __init__(self, module, device_ids=None, output_device=None, dim=0):\n super(DataParallel, self).__init__()\n if device_ids is None:\n device_ids = list(range(torch.cuda.device_count()))\n if output_device is None:\n output_device = device_ids[0]\n self.dim = dim\n self.module = module\n self.device_ids = device_ids\n self.output_device = output_device\n if len(self.device_ids) == 1:\n self.module.cuda(device_ids[0])\n\n def forward(self, *inputs, **kwargs):\n inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)\n if len(self.device_ids) == 1:\n return self.module(*inputs[0], **kwargs[0])\n replicas = self.replicate(self.module, self.device_ids[:len(inputs)])\n outputs = self.parallel_apply(replicas, inputs, kwargs)\n return self.gather(outputs, self.output_device)\n\n def replicate(self, module, device_ids):\n return replicate(module, device_ids)\n\n def scatter(self, inputs, kwargs, device_ids):\n return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)\n\n def parallel_apply(self, replicas, inputs, kwargs):\n return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])\n\n def gather(self, outputs, output_device):\n return gather(outputs, output_device, dim=self.dim)\n\n\ndef data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):\n \"\"\"Evaluates module(input) in parallel across the GPUs given in device_ids.\n\n This is the functional version of the DataParallel module.\n\n Args:\n module: the module to evaluate in parallel\n inputs: inputs to the module\n device_ids: GPU ids on which to replicate module\n output_device: GPU location of the output Use -1 to indicate the CPU.\n (default: device_ids[0])\n Returns:\n a Variable containing the result of module(input) located on\n output_device\n \"\"\"\n if not isinstance(inputs, tuple):\n inputs = (inputs,)\n\n if device_ids is None:\n device_ids = list(range(torch.cuda.device_count()))\n\n if output_device is None:\n output_device = device_ids[0]\n\n inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)\n if len(device_ids) == 1:\n return module(*inputs[0], **module_kwargs[0])\n replicas = replicate(module, device_ids[:len(inputs)])\n outputs = parallel_apply(replicas, inputs, module_kwargs, device_ids)\n return gather(outputs, output_device, dim)\n", "path": "torch/nn/parallel/data_parallel.py"}], "after_files": [{"content": "import torch\nfrom ..modules import Module\nfrom .scatter_gather import scatter_kwargs, gather\nfrom .replicate import replicate\nfrom .parallel_apply import parallel_apply\n\n\nclass DataParallel(Module):\n \"\"\"Implements data parallelism at the module level.\n\n This container parallelizes the application of the given module by\n splitting the input across the specified devices by chunking in the batch\n dimension. In the forward pass, the module is replicated on each device,\n and each replica handles a portion of the input. During the backwards\n pass, gradients from each replica are summed into the original module.\n\n The batch size should be larger than the number of GPUs used. It should\n also be an integer multiple of the number of GPUs so that each chunk is the\n same size (so that each GPU processes the same number of samples).\n\n See also: :ref:`cuda-nn-dataparallel-instead`\n\n Arbitrary positional and keyword inputs are allowed to be passed into\n DataParallel EXCEPT Tensors. All variables will be scattered on dim\n specified (default 0). Primitive types will be broadcasted, but all\n other types will be a shallow copy and can be corrupted if written to in\n the model's forward pass.\n\n Args:\n module: module to be parallelized\n device_ids: CUDA devices (default: all devices)\n output_device: device location of output (default: device_ids[0])\n\n Example::\n\n >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])\n >>> output = net(input_var)\n \"\"\"\n\n # TODO: update notes/cuda.rst when this class handles 8+ GPUs well\n\n def __init__(self, module, device_ids=None, output_device=None, dim=0):\n super(DataParallel, self).__init__()\n if device_ids is None:\n device_ids = list(range(torch.cuda.device_count()))\n if output_device is None:\n output_device = device_ids[0]\n self.dim = dim\n self.module = module\n self.device_ids = device_ids\n self.output_device = output_device\n if len(self.device_ids) == 1:\n self.module.cuda(device_ids[0])\n\n def forward(self, *inputs, **kwargs):\n inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)\n if len(self.device_ids) == 1:\n return self.module(*inputs[0], **kwargs[0])\n replicas = self.replicate(self.module, self.device_ids[:len(inputs)])\n outputs = self.parallel_apply(replicas, inputs, kwargs)\n return self.gather(outputs, self.output_device)\n\n def replicate(self, module, device_ids):\n return replicate(module, device_ids)\n\n def scatter(self, inputs, kwargs, device_ids):\n return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)\n\n def parallel_apply(self, replicas, inputs, kwargs):\n return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])\n\n def gather(self, outputs, output_device):\n return gather(outputs, output_device, dim=self.dim)\n\n\ndef data_parallel(module, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):\n \"\"\"Evaluates module(input) in parallel across the GPUs given in device_ids.\n\n This is the functional version of the DataParallel module.\n\n Args:\n module: the module to evaluate in parallel\n inputs: inputs to the module\n device_ids: GPU ids on which to replicate module\n output_device: GPU location of the output Use -1 to indicate the CPU.\n (default: device_ids[0])\n Returns:\n a Variable containing the result of module(input) located on\n output_device\n \"\"\"\n if not isinstance(inputs, tuple):\n inputs = (inputs,)\n\n if device_ids is None:\n device_ids = list(range(torch.cuda.device_count()))\n\n if output_device is None:\n output_device = device_ids[0]\n\n inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)\n if len(device_ids) == 1:\n return module(*inputs[0], **module_kwargs[0])\n used_device_ids = device_ids[:len(inputs)]\n replicas = replicate(module, used_device_ids)\n outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)\n return gather(outputs, output_device, dim)\n", "path": "torch/nn/parallel/data_parallel.py"}]} | 1,575 | 185 |
gh_patches_debug_34564 | rasdani/github-patches | git_diff | pyinstaller__pyinstaller-7259 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fixes for use of pyinstaller with Django 4.x and custom management commands.
PROBLEM:
This feature aims to solve the problem of the custom app level management commands being missed out from hidden imports alongside issues with imports of apps listed within INSTALLED_APPS failing due to erroneous execution of 'eval_script' function. Specifically when the hidden imports of the INSTALLED_APPS are evaluated the logging outputs generated by 'collect_submodules' when called in django_import_finder.py are captured in the STDOUT regardless of the --log-level. Also any additional management commands provided by one of the INSTALLED_APPS are ignored as the 'get_commands' function has a hardcoded referenced to Django 1.8 command set. Django's currently implementation of command collection will not complain of missing commands at runtime thereby rendering the patch of this function that is currently implemented irrelevant.
SOLUTION:
The solution to this issue is to remove several redundant parts of the code alongside adding additional overrides for decluttering STDOUT.
The following is a list of measures taken to resolve the problem
- remove the monkey patching of Django's 'get_commands' method in pyi_rth_django.py
- modify the collect static code to have a boolean input parameter 'log' which when the relevant calls to logging within this function are wrapped in a conditional will serve to prevent logs being inappropriately raised.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PyInstaller/hooks/rthooks/pyi_rth_django.py`
Content:
```
1 #-----------------------------------------------------------------------------
2 # Copyright (c) 2005-2022, PyInstaller Development Team.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 #
7 # The full license is in the file COPYING.txt, distributed with this software.
8 #
9 # SPDX-License-Identifier: Apache-2.0
10 #-----------------------------------------------------------------------------
11
12 # This Django rthook was tested with Django 1.8.3.
13
14 import django.core.management
15 import django.utils.autoreload
16
17
18 def _get_commands():
19 # Django groupss commands by app. This returns static dict() as it is for django 1.8 and the default project.
20 commands = {
21 'changepassword': 'django.contrib.auth',
22 'check': 'django.core',
23 'clearsessions': 'django.contrib.sessions',
24 'collectstatic': 'django.contrib.staticfiles',
25 'compilemessages': 'django.core',
26 'createcachetable': 'django.core',
27 'createsuperuser': 'django.contrib.auth',
28 'dbshell': 'django.core',
29 'diffsettings': 'django.core',
30 'dumpdata': 'django.core',
31 'findstatic': 'django.contrib.staticfiles',
32 'flush': 'django.core',
33 'inspectdb': 'django.core',
34 'loaddata': 'django.core',
35 'makemessages': 'django.core',
36 'makemigrations': 'django.core',
37 'migrate': 'django.core',
38 'runfcgi': 'django.core',
39 'runserver': 'django.core',
40 'shell': 'django.core',
41 'showmigrations': 'django.core',
42 'sql': 'django.core',
43 'sqlall': 'django.core',
44 'sqlclear': 'django.core',
45 'sqlcustom': 'django.core',
46 'sqldropindexes': 'django.core',
47 'sqlflush': 'django.core',
48 'sqlindexes': 'django.core',
49 'sqlmigrate': 'django.core',
50 'sqlsequencereset': 'django.core',
51 'squashmigrations': 'django.core',
52 'startapp': 'django.core',
53 'startproject': 'django.core',
54 'syncdb': 'django.core',
55 'test': 'django.core',
56 'testserver': 'django.core',
57 'validate': 'django.core'
58 }
59 return commands
60
61
62 _old_restart_with_reloader = django.utils.autoreload.restart_with_reloader
63
64
65 def _restart_with_reloader(*args):
66 import sys
67 a0 = sys.argv.pop(0)
68 try:
69 return _old_restart_with_reloader(*args)
70 finally:
71 sys.argv.insert(0, a0)
72
73
74 # Override get_commands() function otherwise the app will complain that there are no commands.
75 django.core.management.get_commands = _get_commands
76 # Override restart_with_reloader() function, otherwise the app might complain that some commands do not exist;
77 # e.g., runserver.
78 django.utils.autoreload.restart_with_reloader = _restart_with_reloader
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/PyInstaller/hooks/rthooks/pyi_rth_django.py b/PyInstaller/hooks/rthooks/pyi_rth_django.py
--- a/PyInstaller/hooks/rthooks/pyi_rth_django.py
+++ b/PyInstaller/hooks/rthooks/pyi_rth_django.py
@@ -11,54 +11,8 @@
# This Django rthook was tested with Django 1.8.3.
-import django.core.management
import django.utils.autoreload
-
-def _get_commands():
- # Django groupss commands by app. This returns static dict() as it is for django 1.8 and the default project.
- commands = {
- 'changepassword': 'django.contrib.auth',
- 'check': 'django.core',
- 'clearsessions': 'django.contrib.sessions',
- 'collectstatic': 'django.contrib.staticfiles',
- 'compilemessages': 'django.core',
- 'createcachetable': 'django.core',
- 'createsuperuser': 'django.contrib.auth',
- 'dbshell': 'django.core',
- 'diffsettings': 'django.core',
- 'dumpdata': 'django.core',
- 'findstatic': 'django.contrib.staticfiles',
- 'flush': 'django.core',
- 'inspectdb': 'django.core',
- 'loaddata': 'django.core',
- 'makemessages': 'django.core',
- 'makemigrations': 'django.core',
- 'migrate': 'django.core',
- 'runfcgi': 'django.core',
- 'runserver': 'django.core',
- 'shell': 'django.core',
- 'showmigrations': 'django.core',
- 'sql': 'django.core',
- 'sqlall': 'django.core',
- 'sqlclear': 'django.core',
- 'sqlcustom': 'django.core',
- 'sqldropindexes': 'django.core',
- 'sqlflush': 'django.core',
- 'sqlindexes': 'django.core',
- 'sqlmigrate': 'django.core',
- 'sqlsequencereset': 'django.core',
- 'squashmigrations': 'django.core',
- 'startapp': 'django.core',
- 'startproject': 'django.core',
- 'syncdb': 'django.core',
- 'test': 'django.core',
- 'testserver': 'django.core',
- 'validate': 'django.core'
- }
- return commands
-
-
_old_restart_with_reloader = django.utils.autoreload.restart_with_reloader
@@ -71,8 +25,6 @@
sys.argv.insert(0, a0)
-# Override get_commands() function otherwise the app will complain that there are no commands.
-django.core.management.get_commands = _get_commands
# Override restart_with_reloader() function, otherwise the app might complain that some commands do not exist;
# e.g., runserver.
django.utils.autoreload.restart_with_reloader = _restart_with_reloader
| {"golden_diff": "diff --git a/PyInstaller/hooks/rthooks/pyi_rth_django.py b/PyInstaller/hooks/rthooks/pyi_rth_django.py\n--- a/PyInstaller/hooks/rthooks/pyi_rth_django.py\n+++ b/PyInstaller/hooks/rthooks/pyi_rth_django.py\n@@ -11,54 +11,8 @@\n \n # This Django rthook was tested with Django 1.8.3.\n \n-import django.core.management\n import django.utils.autoreload\n \n-\n-def _get_commands():\n- # Django groupss commands by app. This returns static dict() as it is for django 1.8 and the default project.\n- commands = {\n- 'changepassword': 'django.contrib.auth',\n- 'check': 'django.core',\n- 'clearsessions': 'django.contrib.sessions',\n- 'collectstatic': 'django.contrib.staticfiles',\n- 'compilemessages': 'django.core',\n- 'createcachetable': 'django.core',\n- 'createsuperuser': 'django.contrib.auth',\n- 'dbshell': 'django.core',\n- 'diffsettings': 'django.core',\n- 'dumpdata': 'django.core',\n- 'findstatic': 'django.contrib.staticfiles',\n- 'flush': 'django.core',\n- 'inspectdb': 'django.core',\n- 'loaddata': 'django.core',\n- 'makemessages': 'django.core',\n- 'makemigrations': 'django.core',\n- 'migrate': 'django.core',\n- 'runfcgi': 'django.core',\n- 'runserver': 'django.core',\n- 'shell': 'django.core',\n- 'showmigrations': 'django.core',\n- 'sql': 'django.core',\n- 'sqlall': 'django.core',\n- 'sqlclear': 'django.core',\n- 'sqlcustom': 'django.core',\n- 'sqldropindexes': 'django.core',\n- 'sqlflush': 'django.core',\n- 'sqlindexes': 'django.core',\n- 'sqlmigrate': 'django.core',\n- 'sqlsequencereset': 'django.core',\n- 'squashmigrations': 'django.core',\n- 'startapp': 'django.core',\n- 'startproject': 'django.core',\n- 'syncdb': 'django.core',\n- 'test': 'django.core',\n- 'testserver': 'django.core',\n- 'validate': 'django.core'\n- }\n- return commands\n-\n-\n _old_restart_with_reloader = django.utils.autoreload.restart_with_reloader\n \n \n@@ -71,8 +25,6 @@\n sys.argv.insert(0, a0)\n \n \n-# Override get_commands() function otherwise the app will complain that there are no commands.\n-django.core.management.get_commands = _get_commands\n # Override restart_with_reloader() function, otherwise the app might complain that some commands do not exist;\n # e.g., runserver.\n django.utils.autoreload.restart_with_reloader = _restart_with_reloader\n", "issue": "Fixes for use of pyinstaller with Django 4.x and custom management commands.\nPROBLEM:\r\nThis feature aims to solve the problem of the custom app level management commands being missed out from hidden imports alongside issues with imports of apps listed within INSTALLED_APPS failing due to erroneous execution of 'eval_script' function. Specifically when the hidden imports of the INSTALLED_APPS are evaluated the logging outputs generated by 'collect_submodules' when called in django_import_finder.py are captured in the STDOUT regardless of the --log-level. Also any additional management commands provided by one of the INSTALLED_APPS are ignored as the 'get_commands' function has a hardcoded referenced to Django 1.8 command set. Django's currently implementation of command collection will not complain of missing commands at runtime thereby rendering the patch of this function that is currently implemented irrelevant.\r\n\r\nSOLUTION:\r\nThe solution to this issue is to remove several redundant parts of the code alongside adding additional overrides for decluttering STDOUT. \r\n\r\nThe following is a list of measures taken to resolve the problem\r\n- remove the monkey patching of Django's 'get_commands' method in pyi_rth_django.py\r\n- modify the collect static code to have a boolean input parameter 'log' which when the relevant calls to logging within this function are wrapped in a conditional will serve to prevent logs being inappropriately raised.\r\n\n", "before_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2022, PyInstaller Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: Apache-2.0\n#-----------------------------------------------------------------------------\n\n# This Django rthook was tested with Django 1.8.3.\n\nimport django.core.management\nimport django.utils.autoreload\n\n\ndef _get_commands():\n # Django groupss commands by app. This returns static dict() as it is for django 1.8 and the default project.\n commands = {\n 'changepassword': 'django.contrib.auth',\n 'check': 'django.core',\n 'clearsessions': 'django.contrib.sessions',\n 'collectstatic': 'django.contrib.staticfiles',\n 'compilemessages': 'django.core',\n 'createcachetable': 'django.core',\n 'createsuperuser': 'django.contrib.auth',\n 'dbshell': 'django.core',\n 'diffsettings': 'django.core',\n 'dumpdata': 'django.core',\n 'findstatic': 'django.contrib.staticfiles',\n 'flush': 'django.core',\n 'inspectdb': 'django.core',\n 'loaddata': 'django.core',\n 'makemessages': 'django.core',\n 'makemigrations': 'django.core',\n 'migrate': 'django.core',\n 'runfcgi': 'django.core',\n 'runserver': 'django.core',\n 'shell': 'django.core',\n 'showmigrations': 'django.core',\n 'sql': 'django.core',\n 'sqlall': 'django.core',\n 'sqlclear': 'django.core',\n 'sqlcustom': 'django.core',\n 'sqldropindexes': 'django.core',\n 'sqlflush': 'django.core',\n 'sqlindexes': 'django.core',\n 'sqlmigrate': 'django.core',\n 'sqlsequencereset': 'django.core',\n 'squashmigrations': 'django.core',\n 'startapp': 'django.core',\n 'startproject': 'django.core',\n 'syncdb': 'django.core',\n 'test': 'django.core',\n 'testserver': 'django.core',\n 'validate': 'django.core'\n }\n return commands\n\n\n_old_restart_with_reloader = django.utils.autoreload.restart_with_reloader\n\n\ndef _restart_with_reloader(*args):\n import sys\n a0 = sys.argv.pop(0)\n try:\n return _old_restart_with_reloader(*args)\n finally:\n sys.argv.insert(0, a0)\n\n\n# Override get_commands() function otherwise the app will complain that there are no commands.\ndjango.core.management.get_commands = _get_commands\n# Override restart_with_reloader() function, otherwise the app might complain that some commands do not exist;\n# e.g., runserver.\ndjango.utils.autoreload.restart_with_reloader = _restart_with_reloader\n", "path": "PyInstaller/hooks/rthooks/pyi_rth_django.py"}], "after_files": [{"content": "#-----------------------------------------------------------------------------\n# Copyright (c) 2005-2022, PyInstaller Development Team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#\n# SPDX-License-Identifier: Apache-2.0\n#-----------------------------------------------------------------------------\n\n# This Django rthook was tested with Django 1.8.3.\n\nimport django.utils.autoreload\n\n_old_restart_with_reloader = django.utils.autoreload.restart_with_reloader\n\n\ndef _restart_with_reloader(*args):\n import sys\n a0 = sys.argv.pop(0)\n try:\n return _old_restart_with_reloader(*args)\n finally:\n sys.argv.insert(0, a0)\n\n\n# Override restart_with_reloader() function, otherwise the app might complain that some commands do not exist;\n# e.g., runserver.\ndjango.utils.autoreload.restart_with_reloader = _restart_with_reloader\n", "path": "PyInstaller/hooks/rthooks/pyi_rth_django.py"}]} | 1,349 | 663 |
gh_patches_debug_24655 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-3217 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make entry_points behave the same across Python versions
The recently introduced `entry_points` function does not behave the same across Python versions and it is not possible to get all entry points in Python 3.8 and 3.9.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-api/src/opentelemetry/util/_importlib_metadata.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from sys import version_info
16
17 # FIXME remove this when support for 3.7 is dropped.
18 if version_info.minor == 7:
19 # pylint: disable=import-error
20 from importlib_metadata import entry_points, version # type: ignore
21
22 # FIXME remove this file when support for 3.9 is dropped.
23 elif version_info.minor in (8, 9):
24 # pylint: disable=import-error
25 from importlib.metadata import (
26 entry_points as importlib_metadata_entry_points,
27 )
28 from importlib.metadata import version
29
30 def entry_points(group: str, name: str): # type: ignore
31 for entry_point in importlib_metadata_entry_points()[group]:
32 if entry_point.name == name:
33 yield entry_point
34
35 else:
36 from importlib.metadata import entry_points, version
37
38 __all__ = ["entry_points", "version"]
39
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opentelemetry-api/src/opentelemetry/util/_importlib_metadata.py b/opentelemetry-api/src/opentelemetry/util/_importlib_metadata.py
--- a/opentelemetry-api/src/opentelemetry/util/_importlib_metadata.py
+++ b/opentelemetry-api/src/opentelemetry/util/_importlib_metadata.py
@@ -12,27 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-from sys import version_info
+# FIXME: Use importlib.metadata when support for 3.11 is dropped if the rest of
+# the supported versions at that time have the same API.
+from importlib_metadata import ( # type: ignore
+ EntryPoint,
+ EntryPoints,
+ entry_points,
+ version,
+)
-# FIXME remove this when support for 3.7 is dropped.
-if version_info.minor == 7:
- # pylint: disable=import-error
- from importlib_metadata import entry_points, version # type: ignore
+# The importlib-metadata library has introduced breaking changes before to its
+# API, this module is kept just to act as a layer between the
+# importlib-metadata library and our project if in any case it is necessary to
+# do so.
-# FIXME remove this file when support for 3.9 is dropped.
-elif version_info.minor in (8, 9):
- # pylint: disable=import-error
- from importlib.metadata import (
- entry_points as importlib_metadata_entry_points,
- )
- from importlib.metadata import version
-
- def entry_points(group: str, name: str): # type: ignore
- for entry_point in importlib_metadata_entry_points()[group]:
- if entry_point.name == name:
- yield entry_point
-
-else:
- from importlib.metadata import entry_points, version
-
-__all__ = ["entry_points", "version"]
+__all__ = ["entry_points", "version", "EntryPoint", "EntryPoints"]
| {"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/util/_importlib_metadata.py b/opentelemetry-api/src/opentelemetry/util/_importlib_metadata.py\n--- a/opentelemetry-api/src/opentelemetry/util/_importlib_metadata.py\n+++ b/opentelemetry-api/src/opentelemetry/util/_importlib_metadata.py\n@@ -12,27 +12,18 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n-from sys import version_info\n+# FIXME: Use importlib.metadata when support for 3.11 is dropped if the rest of\n+# the supported versions at that time have the same API.\n+from importlib_metadata import ( # type: ignore\n+ EntryPoint,\n+ EntryPoints,\n+ entry_points,\n+ version,\n+)\n \n-# FIXME remove this when support for 3.7 is dropped.\n-if version_info.minor == 7:\n- # pylint: disable=import-error\n- from importlib_metadata import entry_points, version # type: ignore\n+# The importlib-metadata library has introduced breaking changes before to its\n+# API, this module is kept just to act as a layer between the\n+# importlib-metadata library and our project if in any case it is necessary to\n+# do so.\n \n-# FIXME remove this file when support for 3.9 is dropped.\n-elif version_info.minor in (8, 9):\n- # pylint: disable=import-error\n- from importlib.metadata import (\n- entry_points as importlib_metadata_entry_points,\n- )\n- from importlib.metadata import version\n-\n- def entry_points(group: str, name: str): # type: ignore\n- for entry_point in importlib_metadata_entry_points()[group]:\n- if entry_point.name == name:\n- yield entry_point\n-\n-else:\n- from importlib.metadata import entry_points, version\n-\n-__all__ = [\"entry_points\", \"version\"]\n+__all__ = [\"entry_points\", \"version\", \"EntryPoint\", \"EntryPoints\"]\n", "issue": "Make entry_points behave the same across Python versions\nThe recently introduced `entry_points` function does not behave the same across Python versions and it is not possible to get all entry points in Python 3.8 and 3.9.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom sys import version_info\n\n# FIXME remove this when support for 3.7 is dropped.\nif version_info.minor == 7:\n # pylint: disable=import-error\n from importlib_metadata import entry_points, version # type: ignore\n\n# FIXME remove this file when support for 3.9 is dropped.\nelif version_info.minor in (8, 9):\n # pylint: disable=import-error\n from importlib.metadata import (\n entry_points as importlib_metadata_entry_points,\n )\n from importlib.metadata import version\n\n def entry_points(group: str, name: str): # type: ignore\n for entry_point in importlib_metadata_entry_points()[group]:\n if entry_point.name == name:\n yield entry_point\n\nelse:\n from importlib.metadata import entry_points, version\n\n__all__ = [\"entry_points\", \"version\"]\n", "path": "opentelemetry-api/src/opentelemetry/util/_importlib_metadata.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# FIXME: Use importlib.metadata when support for 3.11 is dropped if the rest of\n# the supported versions at that time have the same API.\nfrom importlib_metadata import ( # type: ignore\n EntryPoint,\n EntryPoints,\n entry_points,\n version,\n)\n\n# The importlib-metadata library has introduced breaking changes before to its\n# API, this module is kept just to act as a layer between the\n# importlib-metadata library and our project if in any case it is necessary to\n# do so.\n\n__all__ = [\"entry_points\", \"version\", \"EntryPoint\", \"EntryPoints\"]\n", "path": "opentelemetry-api/src/opentelemetry/util/_importlib_metadata.py"}]} | 704 | 445 |
gh_patches_debug_3318 | rasdani/github-patches | git_diff | feast-dev__feast-2753 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to access data in Feast UI when deployed to remote instance
## Expected Behavior
Should be able to view registry data when launching UI with `feast ui` on remote instances (like EC2).
## Current Behavior
I’ve tried setting the host to `0.0.0.0` and the static assets get loaded and can accessed via the public IP. But the requests to the registry (`http://0.0.0.0:8888/registry`) fails, so no data shows up.
I've also tried setting the host to the private IP, but the request to `/registry` times out.
## Steps to reproduce
Run `feast ui --host <instance private ip>` in EC2 instance.
### Specifications
- Version:`0.21.2`
- Platform: EC2
- Subsystem:
## Possible Solution
Potential CORS issue that needs to be fixed?
Unable to access data in Feast UI when deployed to remote instance
## Expected Behavior
Should be able to view registry data when launching UI with `feast ui` on remote instances (like EC2).
## Current Behavior
I’ve tried setting the host to `0.0.0.0` and the static assets get loaded and can accessed via the public IP. But the requests to the registry (`http://0.0.0.0:8888/registry`) fails, so no data shows up.
I've also tried setting the host to the private IP, but the request to `/registry` times out.
## Steps to reproduce
Run `feast ui --host <instance private ip>` in EC2 instance.
### Specifications
- Version:`0.21.2`
- Platform: EC2
- Subsystem:
## Possible Solution
Potential CORS issue that needs to be fixed?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/feast/ui_server.py`
Content:
```
1 import json
2 import threading
3 from typing import Callable, Optional
4
5 import pkg_resources
6 import uvicorn
7 from fastapi import FastAPI, Response
8 from fastapi.middleware.cors import CORSMiddleware
9 from fastapi.staticfiles import StaticFiles
10
11 import feast
12
13
14 def get_app(
15 store: "feast.FeatureStore",
16 get_registry_dump: Callable,
17 project_id: str,
18 registry_ttl_secs: int,
19 host: str,
20 port: int,
21 ):
22 app = FastAPI()
23
24 app.add_middleware(
25 CORSMiddleware,
26 allow_origins=["*"],
27 allow_credentials=True,
28 allow_methods=["*"],
29 allow_headers=["*"],
30 )
31
32 # Asynchronously refresh registry, notifying shutdown and canceling the active timer if the app is shutting down
33 registry_json = ""
34 shutting_down = False
35 active_timer: Optional[threading.Timer] = None
36
37 def async_refresh():
38 store.refresh_registry()
39 nonlocal registry_json
40 registry_json = get_registry_dump(store.config, store.repo_path)
41 if shutting_down:
42 return
43 nonlocal active_timer
44 active_timer = threading.Timer(registry_ttl_secs, async_refresh)
45 active_timer.start()
46
47 @app.on_event("shutdown")
48 def shutdown_event():
49 nonlocal shutting_down
50 shutting_down = True
51 if active_timer:
52 active_timer.cancel()
53
54 async_refresh()
55
56 ui_dir = pkg_resources.resource_filename(__name__, "ui/build/")
57 # Initialize with the projects-list.json file
58 with open(ui_dir + "projects-list.json", mode="w") as f:
59 projects_dict = {
60 "projects": [
61 {
62 "name": "Project",
63 "description": "Test project",
64 "id": project_id,
65 "registryPath": f"http://{host}:{port}/registry",
66 }
67 ]
68 }
69 f.write(json.dumps(projects_dict))
70
71 @app.get("/registry")
72 def read_registry():
73 return json.loads(registry_json)
74
75 # For all other paths (such as paths that would otherwise be handled by react router), pass to React
76 @app.api_route("/p/{path_name:path}", methods=["GET"])
77 def catch_all():
78 filename = ui_dir + "index.html"
79
80 with open(filename) as f:
81 content = f.read()
82
83 return Response(content, media_type="text/html")
84
85 app.mount(
86 "/", StaticFiles(directory=ui_dir, html=True), name="site",
87 )
88
89 return app
90
91
92 def start_server(
93 store: "feast.FeatureStore",
94 host: str,
95 port: int,
96 get_registry_dump: Callable,
97 project_id: str,
98 registry_ttl_sec: int,
99 ):
100 app = get_app(store, get_registry_dump, project_id, registry_ttl_sec, host, port)
101 uvicorn.run(app, host=host, port=port)
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sdk/python/feast/ui_server.py b/sdk/python/feast/ui_server.py
--- a/sdk/python/feast/ui_server.py
+++ b/sdk/python/feast/ui_server.py
@@ -62,7 +62,7 @@
"name": "Project",
"description": "Test project",
"id": project_id,
- "registryPath": f"http://{host}:{port}/registry",
+ "registryPath": "/registry",
}
]
}
| {"golden_diff": "diff --git a/sdk/python/feast/ui_server.py b/sdk/python/feast/ui_server.py\n--- a/sdk/python/feast/ui_server.py\n+++ b/sdk/python/feast/ui_server.py\n@@ -62,7 +62,7 @@\n \"name\": \"Project\",\n \"description\": \"Test project\",\n \"id\": project_id,\n- \"registryPath\": f\"http://{host}:{port}/registry\",\n+ \"registryPath\": \"/registry\",\n }\n ]\n }\n", "issue": "Unable to access data in Feast UI when deployed to remote instance\n## Expected Behavior \r\nShould be able to view registry data when launching UI with `feast ui` on remote instances (like EC2).\r\n\r\n## Current Behavior\r\nI\u2019ve tried setting the host to `0.0.0.0` and the static assets get loaded and can accessed via the public IP. But the requests to the registry (`http://0.0.0.0:8888/registry`) fails, so no data shows up.\r\n\r\nI've also tried setting the host to the private IP, but the request to `/registry` times out.\r\n\r\n## Steps to reproduce\r\nRun `feast ui --host <instance private ip>` in EC2 instance.\r\n\r\n### Specifications\r\n\r\n- Version:`0.21.2`\r\n- Platform: EC2\r\n- Subsystem:\r\n\r\n## Possible Solution\r\nPotential CORS issue that needs to be fixed?\nUnable to access data in Feast UI when deployed to remote instance\n## Expected Behavior \r\nShould be able to view registry data when launching UI with `feast ui` on remote instances (like EC2).\r\n\r\n## Current Behavior\r\nI\u2019ve tried setting the host to `0.0.0.0` and the static assets get loaded and can accessed via the public IP. But the requests to the registry (`http://0.0.0.0:8888/registry`) fails, so no data shows up.\r\n\r\nI've also tried setting the host to the private IP, but the request to `/registry` times out.\r\n\r\n## Steps to reproduce\r\nRun `feast ui --host <instance private ip>` in EC2 instance.\r\n\r\n### Specifications\r\n\r\n- Version:`0.21.2`\r\n- Platform: EC2\r\n- Subsystem:\r\n\r\n## Possible Solution\r\nPotential CORS issue that needs to be fixed?\n", "before_files": [{"content": "import json\nimport threading\nfrom typing import Callable, Optional\n\nimport pkg_resources\nimport uvicorn\nfrom fastapi import FastAPI, Response\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.staticfiles import StaticFiles\n\nimport feast\n\n\ndef get_app(\n store: \"feast.FeatureStore\",\n get_registry_dump: Callable,\n project_id: str,\n registry_ttl_secs: int,\n host: str,\n port: int,\n):\n app = FastAPI()\n\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n # Asynchronously refresh registry, notifying shutdown and canceling the active timer if the app is shutting down\n registry_json = \"\"\n shutting_down = False\n active_timer: Optional[threading.Timer] = None\n\n def async_refresh():\n store.refresh_registry()\n nonlocal registry_json\n registry_json = get_registry_dump(store.config, store.repo_path)\n if shutting_down:\n return\n nonlocal active_timer\n active_timer = threading.Timer(registry_ttl_secs, async_refresh)\n active_timer.start()\n\n @app.on_event(\"shutdown\")\n def shutdown_event():\n nonlocal shutting_down\n shutting_down = True\n if active_timer:\n active_timer.cancel()\n\n async_refresh()\n\n ui_dir = pkg_resources.resource_filename(__name__, \"ui/build/\")\n # Initialize with the projects-list.json file\n with open(ui_dir + \"projects-list.json\", mode=\"w\") as f:\n projects_dict = {\n \"projects\": [\n {\n \"name\": \"Project\",\n \"description\": \"Test project\",\n \"id\": project_id,\n \"registryPath\": f\"http://{host}:{port}/registry\",\n }\n ]\n }\n f.write(json.dumps(projects_dict))\n\n @app.get(\"/registry\")\n def read_registry():\n return json.loads(registry_json)\n\n # For all other paths (such as paths that would otherwise be handled by react router), pass to React\n @app.api_route(\"/p/{path_name:path}\", methods=[\"GET\"])\n def catch_all():\n filename = ui_dir + \"index.html\"\n\n with open(filename) as f:\n content = f.read()\n\n return Response(content, media_type=\"text/html\")\n\n app.mount(\n \"/\", StaticFiles(directory=ui_dir, html=True), name=\"site\",\n )\n\n return app\n\n\ndef start_server(\n store: \"feast.FeatureStore\",\n host: str,\n port: int,\n get_registry_dump: Callable,\n project_id: str,\n registry_ttl_sec: int,\n):\n app = get_app(store, get_registry_dump, project_id, registry_ttl_sec, host, port)\n uvicorn.run(app, host=host, port=port)\n", "path": "sdk/python/feast/ui_server.py"}], "after_files": [{"content": "import json\nimport threading\nfrom typing import Callable, Optional\n\nimport pkg_resources\nimport uvicorn\nfrom fastapi import FastAPI, Response\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.staticfiles import StaticFiles\n\nimport feast\n\n\ndef get_app(\n store: \"feast.FeatureStore\",\n get_registry_dump: Callable,\n project_id: str,\n registry_ttl_secs: int,\n host: str,\n port: int,\n):\n app = FastAPI()\n\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n # Asynchronously refresh registry, notifying shutdown and canceling the active timer if the app is shutting down\n registry_json = \"\"\n shutting_down = False\n active_timer: Optional[threading.Timer] = None\n\n def async_refresh():\n store.refresh_registry()\n nonlocal registry_json\n registry_json = get_registry_dump(store.config, store.repo_path)\n if shutting_down:\n return\n nonlocal active_timer\n active_timer = threading.Timer(registry_ttl_secs, async_refresh)\n active_timer.start()\n\n @app.on_event(\"shutdown\")\n def shutdown_event():\n nonlocal shutting_down\n shutting_down = True\n if active_timer:\n active_timer.cancel()\n\n async_refresh()\n\n ui_dir = pkg_resources.resource_filename(__name__, \"ui/build/\")\n # Initialize with the projects-list.json file\n with open(ui_dir + \"projects-list.json\", mode=\"w\") as f:\n projects_dict = {\n \"projects\": [\n {\n \"name\": \"Project\",\n \"description\": \"Test project\",\n \"id\": project_id,\n \"registryPath\": \"/registry\",\n }\n ]\n }\n f.write(json.dumps(projects_dict))\n\n @app.get(\"/registry\")\n def read_registry():\n return json.loads(registry_json)\n\n # For all other paths (such as paths that would otherwise be handled by react router), pass to React\n @app.api_route(\"/p/{path_name:path}\", methods=[\"GET\"])\n def catch_all():\n filename = ui_dir + \"index.html\"\n\n with open(filename) as f:\n content = f.read()\n\n return Response(content, media_type=\"text/html\")\n\n app.mount(\n \"/\", StaticFiles(directory=ui_dir, html=True), name=\"site\",\n )\n\n return app\n\n\ndef start_server(\n store: \"feast.FeatureStore\",\n host: str,\n port: int,\n get_registry_dump: Callable,\n project_id: str,\n registry_ttl_sec: int,\n):\n app = get_app(store, get_registry_dump, project_id, registry_ttl_sec, host, port)\n uvicorn.run(app, host=host, port=port)\n", "path": "sdk/python/feast/ui_server.py"}]} | 1,454 | 108 |
gh_patches_debug_24195 | rasdani/github-patches | git_diff | liqd__a4-meinberlin-2543 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
testing #2252 follow mail: mail participation ends soon
Where: Mail "participation ends soon"
* in a single module project link should go to project view and not to a module view that does not regularly exist in this case. Is that possible?
* As in the other mails: paragraph between two sentences probably looks better.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/offlineevents/models.py`
Content:
```
1 from datetime import timedelta
2
3 from autoslug import AutoSlugField
4 from ckeditor_uploader.fields import RichTextUploadingField
5 from django.db import models
6 from django.urls import reverse
7 from django.utils import timezone
8 from django.utils.translation import ugettext_lazy as _
9
10 from adhocracy4 import transforms
11 from adhocracy4.models.base import UserGeneratedContentModel
12 from adhocracy4.projects import models as project_models
13
14
15 class OfflineEventsQuerySet(models.QuerySet):
16
17 def starts_within(self, hours=72):
18 """All offlineevents starting within the given time."""
19 now = timezone.now()
20 return self.filter(date__gt=now,
21 date__lt=(now + timedelta(hours=hours)))
22
23
24 class OfflineEvent(UserGeneratedContentModel):
25 slug = AutoSlugField(populate_from='name', unique=True)
26 name = models.CharField(max_length=120, verbose_name=_('Name of event'))
27 event_type = models.CharField(
28 max_length=30, verbose_name=_('Event type'),
29 help_text=_('The content of this field is shown in the timeline. It '
30 'should have no more than 30 characters e.g. Information '
31 'event or 3rd public workshop.'))
32 date = models.DateTimeField(
33 verbose_name=_('Date'))
34 description = RichTextUploadingField(
35 config_name='image-editor',
36 verbose_name=_('Description'))
37 project = models.ForeignKey(
38 project_models.Project, on_delete=models.CASCADE)
39
40 objects = OfflineEventsQuerySet.as_manager()
41
42 class Meta:
43 ordering = ['-date']
44
45 def __str__(self):
46 return self.name
47
48 def save(self, *args, **kwargs):
49 self.description = transforms.clean_html_field(
50 self.description, 'image-editor')
51 super().save(*args, **kwargs)
52
53 def get_absolute_url(self):
54 return reverse('meinberlin_offlineevents:offlineevent-detail',
55 args=[str(self.slug)])
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/meinberlin/apps/offlineevents/models.py b/meinberlin/apps/offlineevents/models.py
--- a/meinberlin/apps/offlineevents/models.py
+++ b/meinberlin/apps/offlineevents/models.py
@@ -3,8 +3,8 @@
from autoslug import AutoSlugField
from ckeditor_uploader.fields import RichTextUploadingField
from django.db import models
-from django.urls import reverse
from django.utils import timezone
+from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from adhocracy4 import transforms
@@ -50,6 +50,16 @@
self.description, 'image-editor')
super().save(*args, **kwargs)
+ @cached_property
+ def get_timeline_index(self):
+ if self.project.display_timeline:
+ for count, cluster in enumerate(self.project.participation_dates):
+ if 'event_type' in cluster and self.slug == cluster['slug']:
+ return count
+ return 0
+
def get_absolute_url(self):
- return reverse('meinberlin_offlineevents:offlineevent-detail',
- args=[str(self.slug)])
+ if self.project.display_timeline:
+ return '{}?initialSlide={}'.format(self.project.get_absolute_url(),
+ self.get_timeline_index)
+ return self.project.get_absolute_url()
| {"golden_diff": "diff --git a/meinberlin/apps/offlineevents/models.py b/meinberlin/apps/offlineevents/models.py\n--- a/meinberlin/apps/offlineevents/models.py\n+++ b/meinberlin/apps/offlineevents/models.py\n@@ -3,8 +3,8 @@\n from autoslug import AutoSlugField\n from ckeditor_uploader.fields import RichTextUploadingField\n from django.db import models\n-from django.urls import reverse\n from django.utils import timezone\n+from django.utils.functional import cached_property\n from django.utils.translation import ugettext_lazy as _\n \n from adhocracy4 import transforms\n@@ -50,6 +50,16 @@\n self.description, 'image-editor')\n super().save(*args, **kwargs)\n \n+ @cached_property\n+ def get_timeline_index(self):\n+ if self.project.display_timeline:\n+ for count, cluster in enumerate(self.project.participation_dates):\n+ if 'event_type' in cluster and self.slug == cluster['slug']:\n+ return count\n+ return 0\n+\n def get_absolute_url(self):\n- return reverse('meinberlin_offlineevents:offlineevent-detail',\n- args=[str(self.slug)])\n+ if self.project.display_timeline:\n+ return '{}?initialSlide={}'.format(self.project.get_absolute_url(),\n+ self.get_timeline_index)\n+ return self.project.get_absolute_url()\n", "issue": "testing #2252 follow mail: mail participation ends soon\nWhere: Mail \"participation ends soon\"\r\n\r\n* in a single module project link should go to project view and not to a module view that does not regularly exist in this case. Is that possible?\r\n* As in the other mails: paragraph between two sentences probably looks better.\n", "before_files": [{"content": "from datetime import timedelta\n\nfrom autoslug import AutoSlugField\nfrom ckeditor_uploader.fields import RichTextUploadingField\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4 import transforms\nfrom adhocracy4.models.base import UserGeneratedContentModel\nfrom adhocracy4.projects import models as project_models\n\n\nclass OfflineEventsQuerySet(models.QuerySet):\n\n def starts_within(self, hours=72):\n \"\"\"All offlineevents starting within the given time.\"\"\"\n now = timezone.now()\n return self.filter(date__gt=now,\n date__lt=(now + timedelta(hours=hours)))\n\n\nclass OfflineEvent(UserGeneratedContentModel):\n slug = AutoSlugField(populate_from='name', unique=True)\n name = models.CharField(max_length=120, verbose_name=_('Name of event'))\n event_type = models.CharField(\n max_length=30, verbose_name=_('Event type'),\n help_text=_('The content of this field is shown in the timeline. It '\n 'should have no more than 30 characters e.g. Information '\n 'event or 3rd public workshop.'))\n date = models.DateTimeField(\n verbose_name=_('Date'))\n description = RichTextUploadingField(\n config_name='image-editor',\n verbose_name=_('Description'))\n project = models.ForeignKey(\n project_models.Project, on_delete=models.CASCADE)\n\n objects = OfflineEventsQuerySet.as_manager()\n\n class Meta:\n ordering = ['-date']\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n self.description = transforms.clean_html_field(\n self.description, 'image-editor')\n super().save(*args, **kwargs)\n\n def get_absolute_url(self):\n return reverse('meinberlin_offlineevents:offlineevent-detail',\n args=[str(self.slug)])\n", "path": "meinberlin/apps/offlineevents/models.py"}], "after_files": [{"content": "from datetime import timedelta\n\nfrom autoslug import AutoSlugField\nfrom ckeditor_uploader.fields import RichTextUploadingField\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom adhocracy4 import transforms\nfrom adhocracy4.models.base import UserGeneratedContentModel\nfrom adhocracy4.projects import models as project_models\n\n\nclass OfflineEventsQuerySet(models.QuerySet):\n\n def starts_within(self, hours=72):\n \"\"\"All offlineevents starting within the given time.\"\"\"\n now = timezone.now()\n return self.filter(date__gt=now,\n date__lt=(now + timedelta(hours=hours)))\n\n\nclass OfflineEvent(UserGeneratedContentModel):\n slug = AutoSlugField(populate_from='name', unique=True)\n name = models.CharField(max_length=120, verbose_name=_('Name of event'))\n event_type = models.CharField(\n max_length=30, verbose_name=_('Event type'),\n help_text=_('The content of this field is shown in the timeline. It '\n 'should have no more than 30 characters e.g. Information '\n 'event or 3rd public workshop.'))\n date = models.DateTimeField(\n verbose_name=_('Date'))\n description = RichTextUploadingField(\n config_name='image-editor',\n verbose_name=_('Description'))\n project = models.ForeignKey(\n project_models.Project, on_delete=models.CASCADE)\n\n objects = OfflineEventsQuerySet.as_manager()\n\n class Meta:\n ordering = ['-date']\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n self.description = transforms.clean_html_field(\n self.description, 'image-editor')\n super().save(*args, **kwargs)\n\n @cached_property\n def get_timeline_index(self):\n if self.project.display_timeline:\n for count, cluster in enumerate(self.project.participation_dates):\n if 'event_type' in cluster and self.slug == cluster['slug']:\n return count\n return 0\n\n def get_absolute_url(self):\n if self.project.display_timeline:\n return '{}?initialSlide={}'.format(self.project.get_absolute_url(),\n self.get_timeline_index)\n return self.project.get_absolute_url()\n", "path": "meinberlin/apps/offlineevents/models.py"}]} | 850 | 298 |
gh_patches_debug_22114 | rasdani/github-patches | git_diff | deepchecks__deepchecks-550 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Receiving FutureWarning for each label on Calibration Score check
**Describe the bug**
Receiving FutureWarning for each label on Calibration Score
**To Reproduce**
Run a categorical Dataset on Calibration Score check
**Expected behavior**
No warnings
**Screenshots**

**Environment (please complete the following information):**
- OS: mac
- Python Version: 3.8
- Deepchecks Version: 0.2.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepchecks/checks/performance/calibration_score.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """The calibration score check module."""
12 from sklearn.base import BaseEstimator
13 from sklearn.calibration import calibration_curve
14 from sklearn.metrics import brier_score_loss
15 import plotly.graph_objects as go
16
17 from deepchecks import Dataset, CheckResult, SingleDatasetBaseCheck
18 from deepchecks.utils.metrics import ModelType, task_type_validation
19
20
21 __all__ = ['CalibrationScore']
22
23
24 class CalibrationScore(SingleDatasetBaseCheck):
25 """Calculate the calibration curve with brier score for each class."""
26
27 def run(self, dataset: Dataset, model: BaseEstimator) -> CheckResult:
28 """Run check.
29
30 Args:
31 model (BaseEstimator): A scikit-learn-compatible fitted estimator instance
32 dataset: a Dataset object
33 Returns:
34 CheckResult: value is dictionary of class and it's brier score, displays the calibration curve
35 graph with each class
36
37 Raises:
38 DeepchecksValueError: If the object is not a Dataset instance with a label
39 """
40 return self._calibration_score(dataset, model)
41
42 def _calibration_score(self, dataset: Dataset, model):
43 Dataset.validate_dataset(dataset)
44 dataset.validate_label()
45 task_type_validation(model, dataset, [ModelType.MULTICLASS, ModelType.BINARY])
46
47 ds_x = dataset.features_columns
48 ds_y = dataset.label_col
49 # Expect predict_proba to return in order of the sorted classes.
50 y_pred = model.predict_proba(ds_x)
51
52 briers_scores = {}
53
54 if len(dataset.classes) == 2:
55 briers_scores[0] = brier_score_loss(ds_y, y_pred[:, 1], pos_label=dataset.classes[1])
56 else:
57 for class_index, class_name in enumerate(dataset.classes):
58 prob_pos = y_pred[:, class_index]
59 clf_score = brier_score_loss(ds_y == class_name, prob_pos, pos_label=class_name)
60 briers_scores[class_name] = clf_score
61
62 fig = go.Figure()
63
64 fig.add_trace(go.Scatter(
65 x=[0, 1],
66 y=[0, 1],
67 line_width=2, line_dash='dash',
68 name='Perfectly calibrated',
69 ))
70
71 if len(dataset.classes) == 2:
72 fraction_of_positives, mean_predicted_value = calibration_curve(ds_y, y_pred[:, 1], n_bins=10)
73
74 fig.add_trace(go.Scatter(
75 x=mean_predicted_value,
76 y=fraction_of_positives,
77 mode='lines+markers',
78 name=f'(brier:{briers_scores[0]:9.4f})',
79 ))
80 else:
81 for class_index, class_name in enumerate(dataset.classes):
82 prob_pos = y_pred[:, class_index]
83
84 fraction_of_positives, mean_predicted_value = \
85 calibration_curve(ds_y == class_name, prob_pos, n_bins=10)
86
87 fig.add_trace(go.Scatter(
88 x=mean_predicted_value,
89 y=fraction_of_positives,
90 mode='lines+markers',
91 name=f'{class_name} (brier:{briers_scores[class_name]:9.4f})',
92 ))
93
94 fig.update_layout(title_text='Calibration plots (reliability curve)',
95 width=700, height=500)
96 fig.update_yaxes(title='Fraction of positives')
97 fig.update_xaxes(title='Mean predicted value')
98
99 calibration_text = 'Calibration curves (also known as reliability diagrams) compare how well the ' \
100 'probabilistic predictions of a binary classifier are calibrated. It plots the true ' \
101 'frequency of the positive label against its predicted probability, for binned predictions.'
102 brier_text = 'The Brier score metric may be used to assess how well a classifier is calibrated. For more ' \
103 'info, please visit https://en.wikipedia.org/wiki/Brier_score'
104 return CheckResult(briers_scores, header='Calibration Metric',
105 display=[calibration_text, fig, brier_text])
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/deepchecks/checks/performance/calibration_score.py b/deepchecks/checks/performance/calibration_score.py
--- a/deepchecks/checks/performance/calibration_score.py
+++ b/deepchecks/checks/performance/calibration_score.py
@@ -17,7 +17,6 @@
from deepchecks import Dataset, CheckResult, SingleDatasetBaseCheck
from deepchecks.utils.metrics import ModelType, task_type_validation
-
__all__ = ['CalibrationScore']
@@ -52,11 +51,11 @@
briers_scores = {}
if len(dataset.classes) == 2:
- briers_scores[0] = brier_score_loss(ds_y, y_pred[:, 1], pos_label=dataset.classes[1])
+ briers_scores[0] = brier_score_loss(ds_y == dataset.classes[1], y_pred[:, 1])
else:
for class_index, class_name in enumerate(dataset.classes):
prob_pos = y_pred[:, class_index]
- clf_score = brier_score_loss(ds_y == class_name, prob_pos, pos_label=class_name)
+ clf_score = brier_score_loss(ds_y == class_name, prob_pos)
briers_scores[class_name] = clf_score
fig = go.Figure()
| {"golden_diff": "diff --git a/deepchecks/checks/performance/calibration_score.py b/deepchecks/checks/performance/calibration_score.py\n--- a/deepchecks/checks/performance/calibration_score.py\n+++ b/deepchecks/checks/performance/calibration_score.py\n@@ -17,7 +17,6 @@\n from deepchecks import Dataset, CheckResult, SingleDatasetBaseCheck\n from deepchecks.utils.metrics import ModelType, task_type_validation\n \n-\n __all__ = ['CalibrationScore']\n \n \n@@ -52,11 +51,11 @@\n briers_scores = {}\n \n if len(dataset.classes) == 2:\n- briers_scores[0] = brier_score_loss(ds_y, y_pred[:, 1], pos_label=dataset.classes[1])\n+ briers_scores[0] = brier_score_loss(ds_y == dataset.classes[1], y_pred[:, 1])\n else:\n for class_index, class_name in enumerate(dataset.classes):\n prob_pos = y_pred[:, class_index]\n- clf_score = brier_score_loss(ds_y == class_name, prob_pos, pos_label=class_name)\n+ clf_score = brier_score_loss(ds_y == class_name, prob_pos)\n briers_scores[class_name] = clf_score\n \n fig = go.Figure()\n", "issue": "[BUG] Receiving FutureWarning for each label on Calibration Score check\n**Describe the bug**\r\nReceiving FutureWarning for each label on Calibration Score\r\n\r\n**To Reproduce**\r\nRun a categorical Dataset on Calibration Score check\r\n\r\n**Expected behavior**\r\nNo warnings\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n**Environment (please complete the following information):**\r\n - OS: mac\r\n - Python Version: 3.8\r\n - Deepchecks Version: 0.2.1\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"The calibration score check module.\"\"\"\nfrom sklearn.base import BaseEstimator\nfrom sklearn.calibration import calibration_curve\nfrom sklearn.metrics import brier_score_loss\nimport plotly.graph_objects as go\n\nfrom deepchecks import Dataset, CheckResult, SingleDatasetBaseCheck\nfrom deepchecks.utils.metrics import ModelType, task_type_validation\n\n\n__all__ = ['CalibrationScore']\n\n\nclass CalibrationScore(SingleDatasetBaseCheck):\n \"\"\"Calculate the calibration curve with brier score for each class.\"\"\"\n\n def run(self, dataset: Dataset, model: BaseEstimator) -> CheckResult:\n \"\"\"Run check.\n\n Args:\n model (BaseEstimator): A scikit-learn-compatible fitted estimator instance\n dataset: a Dataset object\n Returns:\n CheckResult: value is dictionary of class and it's brier score, displays the calibration curve\n graph with each class\n\n Raises:\n DeepchecksValueError: If the object is not a Dataset instance with a label\n \"\"\"\n return self._calibration_score(dataset, model)\n\n def _calibration_score(self, dataset: Dataset, model):\n Dataset.validate_dataset(dataset)\n dataset.validate_label()\n task_type_validation(model, dataset, [ModelType.MULTICLASS, ModelType.BINARY])\n\n ds_x = dataset.features_columns\n ds_y = dataset.label_col\n # Expect predict_proba to return in order of the sorted classes.\n y_pred = model.predict_proba(ds_x)\n\n briers_scores = {}\n\n if len(dataset.classes) == 2:\n briers_scores[0] = brier_score_loss(ds_y, y_pred[:, 1], pos_label=dataset.classes[1])\n else:\n for class_index, class_name in enumerate(dataset.classes):\n prob_pos = y_pred[:, class_index]\n clf_score = brier_score_loss(ds_y == class_name, prob_pos, pos_label=class_name)\n briers_scores[class_name] = clf_score\n\n fig = go.Figure()\n\n fig.add_trace(go.Scatter(\n x=[0, 1],\n y=[0, 1],\n line_width=2, line_dash='dash',\n name='Perfectly calibrated',\n ))\n\n if len(dataset.classes) == 2:\n fraction_of_positives, mean_predicted_value = calibration_curve(ds_y, y_pred[:, 1], n_bins=10)\n\n fig.add_trace(go.Scatter(\n x=mean_predicted_value,\n y=fraction_of_positives,\n mode='lines+markers',\n name=f'(brier:{briers_scores[0]:9.4f})',\n ))\n else:\n for class_index, class_name in enumerate(dataset.classes):\n prob_pos = y_pred[:, class_index]\n\n fraction_of_positives, mean_predicted_value = \\\n calibration_curve(ds_y == class_name, prob_pos, n_bins=10)\n\n fig.add_trace(go.Scatter(\n x=mean_predicted_value,\n y=fraction_of_positives,\n mode='lines+markers',\n name=f'{class_name} (brier:{briers_scores[class_name]:9.4f})',\n ))\n\n fig.update_layout(title_text='Calibration plots (reliability curve)',\n width=700, height=500)\n fig.update_yaxes(title='Fraction of positives')\n fig.update_xaxes(title='Mean predicted value')\n\n calibration_text = 'Calibration curves (also known as reliability diagrams) compare how well the ' \\\n 'probabilistic predictions of a binary classifier are calibrated. It plots the true ' \\\n 'frequency of the positive label against its predicted probability, for binned predictions.'\n brier_text = 'The Brier score metric may be used to assess how well a classifier is calibrated. For more ' \\\n 'info, please visit https://en.wikipedia.org/wiki/Brier_score'\n return CheckResult(briers_scores, header='Calibration Metric',\n display=[calibration_text, fig, brier_text])\n", "path": "deepchecks/checks/performance/calibration_score.py"}], "after_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"The calibration score check module.\"\"\"\nfrom sklearn.base import BaseEstimator\nfrom sklearn.calibration import calibration_curve\nfrom sklearn.metrics import brier_score_loss\nimport plotly.graph_objects as go\n\nfrom deepchecks import Dataset, CheckResult, SingleDatasetBaseCheck\nfrom deepchecks.utils.metrics import ModelType, task_type_validation\n\n__all__ = ['CalibrationScore']\n\n\nclass CalibrationScore(SingleDatasetBaseCheck):\n \"\"\"Calculate the calibration curve with brier score for each class.\"\"\"\n\n def run(self, dataset: Dataset, model: BaseEstimator) -> CheckResult:\n \"\"\"Run check.\n\n Args:\n model (BaseEstimator): A scikit-learn-compatible fitted estimator instance\n dataset: a Dataset object\n Returns:\n CheckResult: value is dictionary of class and it's brier score, displays the calibration curve\n graph with each class\n\n Raises:\n DeepchecksValueError: If the object is not a Dataset instance with a label\n \"\"\"\n return self._calibration_score(dataset, model)\n\n def _calibration_score(self, dataset: Dataset, model):\n Dataset.validate_dataset(dataset)\n dataset.validate_label()\n task_type_validation(model, dataset, [ModelType.MULTICLASS, ModelType.BINARY])\n\n ds_x = dataset.features_columns\n ds_y = dataset.label_col\n # Expect predict_proba to return in order of the sorted classes.\n y_pred = model.predict_proba(ds_x)\n\n briers_scores = {}\n\n if len(dataset.classes) == 2:\n briers_scores[0] = brier_score_loss(ds_y == dataset.classes[1], y_pred[:, 1])\n else:\n for class_index, class_name in enumerate(dataset.classes):\n prob_pos = y_pred[:, class_index]\n clf_score = brier_score_loss(ds_y == class_name, prob_pos)\n briers_scores[class_name] = clf_score\n\n fig = go.Figure()\n\n fig.add_trace(go.Scatter(\n x=[0, 1],\n y=[0, 1],\n line_width=2, line_dash='dash',\n name='Perfectly calibrated',\n ))\n\n if len(dataset.classes) == 2:\n fraction_of_positives, mean_predicted_value = calibration_curve(ds_y, y_pred[:, 1], n_bins=10)\n\n fig.add_trace(go.Scatter(\n x=mean_predicted_value,\n y=fraction_of_positives,\n mode='lines+markers',\n name=f'(brier:{briers_scores[0]:9.4f})',\n ))\n else:\n for class_index, class_name in enumerate(dataset.classes):\n prob_pos = y_pred[:, class_index]\n\n fraction_of_positives, mean_predicted_value = \\\n calibration_curve(ds_y == class_name, prob_pos, n_bins=10)\n\n fig.add_trace(go.Scatter(\n x=mean_predicted_value,\n y=fraction_of_positives,\n mode='lines+markers',\n name=f'{class_name} (brier:{briers_scores[class_name]:9.4f})',\n ))\n\n fig.update_layout(title_text='Calibration plots (reliability curve)',\n width=700, height=500)\n fig.update_yaxes(title='Fraction of positives')\n fig.update_xaxes(title='Mean predicted value')\n\n calibration_text = 'Calibration curves (also known as reliability diagrams) compare how well the ' \\\n 'probabilistic predictions of a binary classifier are calibrated. It plots the true ' \\\n 'frequency of the positive label against its predicted probability, for binned predictions.'\n brier_text = 'The Brier score metric may be used to assess how well a classifier is calibrated. For more ' \\\n 'info, please visit https://en.wikipedia.org/wiki/Brier_score'\n return CheckResult(briers_scores, header='Calibration Metric',\n display=[calibration_text, fig, brier_text])\n", "path": "deepchecks/checks/performance/calibration_score.py"}]} | 1,594 | 279 |
gh_patches_debug_5367 | rasdani/github-patches | git_diff | comic__grand-challenge.org-2344 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'NoneType' object has no attribute 'values'
Sentry issue https://sentry.io/organizations/grand-challenge/issues/3127690895/?project=303639&query=is%3Aunresolved
```
slugs = {slug for viewport in mapping.values() for slug in viewport}
```
Added in https://github.com/comic/grand-challenge.org/pull/2322
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/hanging_protocols/forms.py`
Content:
```
1 from django import forms
2
3 from grandchallenge.components.models import ComponentInterface
4 from grandchallenge.core.forms import SaveFormInitMixin
5 from grandchallenge.core.widgets import JSONEditorWidget
6 from grandchallenge.hanging_protocols.models import (
7 HANGING_PROTOCOL_SCHEMA,
8 VIEW_CONTENT_SCHEMA,
9 HangingProtocol,
10 )
11
12
13 class HangingProtocolForm(SaveFormInitMixin, forms.ModelForm):
14 class Meta:
15 model = HangingProtocol
16 fields = ("title", "description", "json")
17 widgets = {"json": JSONEditorWidget(schema=HANGING_PROTOCOL_SCHEMA)}
18 help_texts = {
19 "json": (
20 "To display a single image in full size, define the "
21 "protocol as follows: "
22 '[{"viewport_name": "main", "x": 0,"y": 0,"w": 1,"h": 1,'
23 '"fullsizable": true,"draggable": false,"selectable": true,'
24 '"order": 0}]'
25 )
26 }
27
28
29 class ViewContentMixin:
30 def clean_view_content(self):
31 mapping = self.cleaned_data["view_content"]
32 hanging_protocol = self.cleaned_data["hanging_protocol"]
33 if mapping and not hanging_protocol:
34 self.add_error(
35 error="Please select a hanging protocol before filling this field.",
36 field="view_content",
37 )
38
39 if mapping and hanging_protocol:
40 if set(mapping.keys()) != {
41 x["viewport_name"] for x in hanging_protocol.json
42 }:
43 self.add_error(
44 error=(
45 "Image ports in view_content do not match "
46 "those in the selected hanging protocol."
47 ),
48 field="view_content",
49 )
50
51 slugs = {slug for viewport in mapping.values() for slug in viewport}
52 unknown = []
53 for slug in slugs:
54 if not ComponentInterface.objects.filter(slug=slug).exists():
55 unknown.append(slug)
56 if len(unknown) > 0:
57 self.add_error(
58 error=f"Unkown slugs in view_content: {', '.join(unknown)}",
59 field="view_content",
60 )
61
62 return mapping
63
64 class Meta:
65 widgets = {
66 "view_content": JSONEditorWidget(schema=VIEW_CONTENT_SCHEMA),
67 }
68 help_texts = {
69 "view_content": (
70 "Indicate which Component Interfaces need to be displayed in "
71 'which image port. E.g. {"main": ["interface1"]}. The first '
72 "item in the list of interfaces will be the main image in "
73 "the image port. The first overlay type interface thereafter "
74 "will be rendered as an overlay. For now, any other items "
75 "will be ignored by the viewer."
76 )
77 }
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/hanging_protocols/forms.py b/app/grandchallenge/hanging_protocols/forms.py
--- a/app/grandchallenge/hanging_protocols/forms.py
+++ b/app/grandchallenge/hanging_protocols/forms.py
@@ -28,7 +28,7 @@
class ViewContentMixin:
def clean_view_content(self):
- mapping = self.cleaned_data["view_content"]
+ mapping = self.cleaned_data["view_content"] or {}
hanging_protocol = self.cleaned_data["hanging_protocol"]
if mapping and not hanging_protocol:
self.add_error(
| {"golden_diff": "diff --git a/app/grandchallenge/hanging_protocols/forms.py b/app/grandchallenge/hanging_protocols/forms.py\n--- a/app/grandchallenge/hanging_protocols/forms.py\n+++ b/app/grandchallenge/hanging_protocols/forms.py\n@@ -28,7 +28,7 @@\n \r\n class ViewContentMixin:\r\n def clean_view_content(self):\r\n- mapping = self.cleaned_data[\"view_content\"]\r\n+ mapping = self.cleaned_data[\"view_content\"] or {}\r\n hanging_protocol = self.cleaned_data[\"hanging_protocol\"]\r\n if mapping and not hanging_protocol:\r\n self.add_error(\n", "issue": "'NoneType' object has no attribute 'values'\nSentry issue https://sentry.io/organizations/grand-challenge/issues/3127690895/?project=303639&query=is%3Aunresolved\r\n\r\n```\r\nslugs = {slug for viewport in mapping.values() for slug in viewport}\r\n```\r\n\r\nAdded in https://github.com/comic/grand-challenge.org/pull/2322\n", "before_files": [{"content": "from django import forms\r\n\r\nfrom grandchallenge.components.models import ComponentInterface\r\nfrom grandchallenge.core.forms import SaveFormInitMixin\r\nfrom grandchallenge.core.widgets import JSONEditorWidget\r\nfrom grandchallenge.hanging_protocols.models import (\r\n HANGING_PROTOCOL_SCHEMA,\r\n VIEW_CONTENT_SCHEMA,\r\n HangingProtocol,\r\n)\r\n\r\n\r\nclass HangingProtocolForm(SaveFormInitMixin, forms.ModelForm):\r\n class Meta:\r\n model = HangingProtocol\r\n fields = (\"title\", \"description\", \"json\")\r\n widgets = {\"json\": JSONEditorWidget(schema=HANGING_PROTOCOL_SCHEMA)}\r\n help_texts = {\r\n \"json\": (\r\n \"To display a single image in full size, define the \"\r\n \"protocol as follows: \"\r\n '[{\"viewport_name\": \"main\", \"x\": 0,\"y\": 0,\"w\": 1,\"h\": 1,'\r\n '\"fullsizable\": true,\"draggable\": false,\"selectable\": true,'\r\n '\"order\": 0}]'\r\n )\r\n }\r\n\r\n\r\nclass ViewContentMixin:\r\n def clean_view_content(self):\r\n mapping = self.cleaned_data[\"view_content\"]\r\n hanging_protocol = self.cleaned_data[\"hanging_protocol\"]\r\n if mapping and not hanging_protocol:\r\n self.add_error(\r\n error=\"Please select a hanging protocol before filling this field.\",\r\n field=\"view_content\",\r\n )\r\n\r\n if mapping and hanging_protocol:\r\n if set(mapping.keys()) != {\r\n x[\"viewport_name\"] for x in hanging_protocol.json\r\n }:\r\n self.add_error(\r\n error=(\r\n \"Image ports in view_content do not match \"\r\n \"those in the selected hanging protocol.\"\r\n ),\r\n field=\"view_content\",\r\n )\r\n\r\n slugs = {slug for viewport in mapping.values() for slug in viewport}\r\n unknown = []\r\n for slug in slugs:\r\n if not ComponentInterface.objects.filter(slug=slug).exists():\r\n unknown.append(slug)\r\n if len(unknown) > 0:\r\n self.add_error(\r\n error=f\"Unkown slugs in view_content: {', '.join(unknown)}\",\r\n field=\"view_content\",\r\n )\r\n\r\n return mapping\r\n\r\n class Meta:\r\n widgets = {\r\n \"view_content\": JSONEditorWidget(schema=VIEW_CONTENT_SCHEMA),\r\n }\r\n help_texts = {\r\n \"view_content\": (\r\n \"Indicate which Component Interfaces need to be displayed in \"\r\n 'which image port. E.g. {\"main\": [\"interface1\"]}. The first '\r\n \"item in the list of interfaces will be the main image in \"\r\n \"the image port. The first overlay type interface thereafter \"\r\n \"will be rendered as an overlay. For now, any other items \"\r\n \"will be ignored by the viewer.\"\r\n )\r\n }\r\n", "path": "app/grandchallenge/hanging_protocols/forms.py"}], "after_files": [{"content": "from django import forms\r\n\r\nfrom grandchallenge.components.models import ComponentInterface\r\nfrom grandchallenge.core.forms import SaveFormInitMixin\r\nfrom grandchallenge.core.widgets import JSONEditorWidget\r\nfrom grandchallenge.hanging_protocols.models import (\r\n HANGING_PROTOCOL_SCHEMA,\r\n VIEW_CONTENT_SCHEMA,\r\n HangingProtocol,\r\n)\r\n\r\n\r\nclass HangingProtocolForm(SaveFormInitMixin, forms.ModelForm):\r\n class Meta:\r\n model = HangingProtocol\r\n fields = (\"title\", \"description\", \"json\")\r\n widgets = {\"json\": JSONEditorWidget(schema=HANGING_PROTOCOL_SCHEMA)}\r\n help_texts = {\r\n \"json\": (\r\n \"To display a single image in full size, define the \"\r\n \"protocol as follows: \"\r\n '[{\"viewport_name\": \"main\", \"x\": 0,\"y\": 0,\"w\": 1,\"h\": 1,'\r\n '\"fullsizable\": true,\"draggable\": false,\"selectable\": true,'\r\n '\"order\": 0}]'\r\n )\r\n }\r\n\r\n\r\nclass ViewContentMixin:\r\n def clean_view_content(self):\r\n mapping = self.cleaned_data[\"view_content\"] or {}\r\n hanging_protocol = self.cleaned_data[\"hanging_protocol\"]\r\n if mapping and not hanging_protocol:\r\n self.add_error(\r\n error=\"Please select a hanging protocol before filling this field.\",\r\n field=\"view_content\",\r\n )\r\n\r\n if mapping and hanging_protocol:\r\n if set(mapping.keys()) != {\r\n x[\"viewport_name\"] for x in hanging_protocol.json\r\n }:\r\n self.add_error(\r\n error=(\r\n \"Image ports in view_content do not match \"\r\n \"those in the selected hanging protocol.\"\r\n ),\r\n field=\"view_content\",\r\n )\r\n\r\n slugs = {slug for viewport in mapping.values() for slug in viewport}\r\n unknown = []\r\n for slug in slugs:\r\n if not ComponentInterface.objects.filter(slug=slug).exists():\r\n unknown.append(slug)\r\n if len(unknown) > 0:\r\n self.add_error(\r\n error=f\"Unkown slugs in view_content: {', '.join(unknown)}\",\r\n field=\"view_content\",\r\n )\r\n\r\n return mapping\r\n\r\n class Meta:\r\n widgets = {\r\n \"view_content\": JSONEditorWidget(schema=VIEW_CONTENT_SCHEMA),\r\n }\r\n help_texts = {\r\n \"view_content\": (\r\n \"Indicate which Component Interfaces need to be displayed in \"\r\n 'which image port. E.g. {\"main\": [\"interface1\"]}. The first '\r\n \"item in the list of interfaces will be the main image in \"\r\n \"the image port. The first overlay type interface thereafter \"\r\n \"will be rendered as an overlay. For now, any other items \"\r\n \"will be ignored by the viewer.\"\r\n )\r\n }\r\n", "path": "app/grandchallenge/hanging_protocols/forms.py"}]} | 1,080 | 126 |
gh_patches_debug_914 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-204 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feature: ignore async function definitions from jones complexity check
Currently we only ignore `ClassDef` and `FunctionDef`: https://github.com/wemake-services/wemake-python-styleguide/blob/master/wemake_python_styleguide/visitors/ast/complexity/jones.py#L38-L41
What needs to be done:
1. ignore `AsyncFunctionDef` from the check
2. we do not have a special test case for ignoring nodes for now. It should be added. We can call it `test_that_some_nodes_are_ignored`. It should test all three ignored nodes: with the lowest complexity threshold there should be no errors: https://github.com/wemake-services/wemake-python-styleguide/blob/master/tests/test_visitors/test_ast/test_complexity/test_jones/test_line_complexity.py
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wemake_python_styleguide/visitors/ast/complexity/jones.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """
4 Jones Complexity to count inline complexity.
5
6 Based on the original `jones-complexity` project:
7 https://github.com/Miserlou/JonesComplexity
8
9 Original project is licensed under MIT.
10 """
11
12 import ast
13 from collections import defaultdict
14 from statistics import median
15 from typing import DefaultDict, List
16
17 from wemake_python_styleguide.logics.nodes import is_subtype_of_any
18 from wemake_python_styleguide.violations.complexity import (
19 JonesScoreViolation,
20 LineComplexityViolation,
21 )
22 from wemake_python_styleguide.visitors.base import BaseNodeVisitor
23
24
25 class JonesComplexityVisitor(BaseNodeVisitor): # TODO: consider `logical_line`
26 """
27 This visitor is used to find complex lines in the code.
28
29 Calculates the number of AST nodes per line of code.
30 Also calculates the median nodes/line score.
31 Then compares these numbers to the given tressholds.
32
33 Some nodes are ignored because there's no sense in analyzing them.
34 Some nodes like type annotations are not affecting line complexity,
35 so we do not count them.
36 """
37
38 _ignored_nodes = (
39 ast.FunctionDef,
40 ast.ClassDef,
41 )
42
43 def __init__(self, *args, **kwargs) -> None:
44 """Initializes line number counter."""
45 super().__init__(*args, **kwargs)
46 self._lines: DefaultDict[int, List[ast.AST]] = defaultdict(list)
47 self._to_ignore: List[ast.AST] = []
48
49 def _post_visit(self) -> None:
50 """
51 Triggers after the whole module was processed.
52
53 Checks each line for its complexity, compares it to the tresshold.
54 We also calculate the final Jones score for the whole module.
55 """
56 for line_nodes in self._lines.values():
57 complexity = len(line_nodes)
58 if complexity > self.options.max_line_complexity:
59 self.add_violation(LineComplexityViolation(
60 line_nodes[0], text=str(complexity),
61 ))
62
63 node_counts = [len(nodes) for nodes in self._lines.values()]
64 total_count = median(node_counts) if node_counts else 0
65 if total_count > self.options.max_jones_score:
66 self.add_violation(JonesScoreViolation())
67
68 def _maybe_ignore_child(self, node: ast.AST) -> bool:
69 if isinstance(node, ast.AnnAssign):
70 self._to_ignore.append(node.annotation)
71
72 return node in self._to_ignore
73
74 def visit(self, node: ast.AST) -> None:
75 """
76 Visits all nodes, sums the number of nodes per line.
77
78 Then calculates the median value of all line results.
79
80 Raises:
81 JonesScoreViolation
82 LineComplexityViolation
83
84 """
85 line_number = getattr(node, 'lineno', None)
86 is_ignored = is_subtype_of_any(node, self._ignored_nodes)
87 if line_number is not None and not is_ignored:
88 if not self._maybe_ignore_child(node):
89 self._lines[line_number].append(node)
90
91 self.generic_visit(node)
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wemake_python_styleguide/visitors/ast/complexity/jones.py b/wemake_python_styleguide/visitors/ast/complexity/jones.py
--- a/wemake_python_styleguide/visitors/ast/complexity/jones.py
+++ b/wemake_python_styleguide/visitors/ast/complexity/jones.py
@@ -38,6 +38,7 @@
_ignored_nodes = (
ast.FunctionDef,
ast.ClassDef,
+ ast.AsyncFunctionDef,
)
def __init__(self, *args, **kwargs) -> None:
| {"golden_diff": "diff --git a/wemake_python_styleguide/visitors/ast/complexity/jones.py b/wemake_python_styleguide/visitors/ast/complexity/jones.py\n--- a/wemake_python_styleguide/visitors/ast/complexity/jones.py\n+++ b/wemake_python_styleguide/visitors/ast/complexity/jones.py\n@@ -38,6 +38,7 @@\n _ignored_nodes = (\n ast.FunctionDef,\n ast.ClassDef,\n+ ast.AsyncFunctionDef,\n )\n \n def __init__(self, *args, **kwargs) -> None:\n", "issue": "Feature: ignore async function definitions from jones complexity check\nCurrently we only ignore `ClassDef` and `FunctionDef`: https://github.com/wemake-services/wemake-python-styleguide/blob/master/wemake_python_styleguide/visitors/ast/complexity/jones.py#L38-L41\r\n\r\nWhat needs to be done:\r\n1. ignore `AsyncFunctionDef` from the check\r\n2. we do not have a special test case for ignoring nodes for now. It should be added. We can call it `test_that_some_nodes_are_ignored`. It should test all three ignored nodes: with the lowest complexity threshold there should be no errors: https://github.com/wemake-services/wemake-python-styleguide/blob/master/tests/test_visitors/test_ast/test_complexity/test_jones/test_line_complexity.py\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nJones Complexity to count inline complexity.\n\nBased on the original `jones-complexity` project:\nhttps://github.com/Miserlou/JonesComplexity\n\nOriginal project is licensed under MIT.\n\"\"\"\n\nimport ast\nfrom collections import defaultdict\nfrom statistics import median\nfrom typing import DefaultDict, List\n\nfrom wemake_python_styleguide.logics.nodes import is_subtype_of_any\nfrom wemake_python_styleguide.violations.complexity import (\n JonesScoreViolation,\n LineComplexityViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass JonesComplexityVisitor(BaseNodeVisitor): # TODO: consider `logical_line`\n \"\"\"\n This visitor is used to find complex lines in the code.\n\n Calculates the number of AST nodes per line of code.\n Also calculates the median nodes/line score.\n Then compares these numbers to the given tressholds.\n\n Some nodes are ignored because there's no sense in analyzing them.\n Some nodes like type annotations are not affecting line complexity,\n so we do not count them.\n \"\"\"\n\n _ignored_nodes = (\n ast.FunctionDef,\n ast.ClassDef,\n )\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Initializes line number counter.\"\"\"\n super().__init__(*args, **kwargs)\n self._lines: DefaultDict[int, List[ast.AST]] = defaultdict(list)\n self._to_ignore: List[ast.AST] = []\n\n def _post_visit(self) -> None:\n \"\"\"\n Triggers after the whole module was processed.\n\n Checks each line for its complexity, compares it to the tresshold.\n We also calculate the final Jones score for the whole module.\n \"\"\"\n for line_nodes in self._lines.values():\n complexity = len(line_nodes)\n if complexity > self.options.max_line_complexity:\n self.add_violation(LineComplexityViolation(\n line_nodes[0], text=str(complexity),\n ))\n\n node_counts = [len(nodes) for nodes in self._lines.values()]\n total_count = median(node_counts) if node_counts else 0\n if total_count > self.options.max_jones_score:\n self.add_violation(JonesScoreViolation())\n\n def _maybe_ignore_child(self, node: ast.AST) -> bool:\n if isinstance(node, ast.AnnAssign):\n self._to_ignore.append(node.annotation)\n\n return node in self._to_ignore\n\n def visit(self, node: ast.AST) -> None:\n \"\"\"\n Visits all nodes, sums the number of nodes per line.\n\n Then calculates the median value of all line results.\n\n Raises:\n JonesScoreViolation\n LineComplexityViolation\n\n \"\"\"\n line_number = getattr(node, 'lineno', None)\n is_ignored = is_subtype_of_any(node, self._ignored_nodes)\n if line_number is not None and not is_ignored:\n if not self._maybe_ignore_child(node):\n self._lines[line_number].append(node)\n\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/complexity/jones.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nJones Complexity to count inline complexity.\n\nBased on the original `jones-complexity` project:\nhttps://github.com/Miserlou/JonesComplexity\n\nOriginal project is licensed under MIT.\n\"\"\"\n\nimport ast\nfrom collections import defaultdict\nfrom statistics import median\nfrom typing import DefaultDict, List\n\nfrom wemake_python_styleguide.logics.nodes import is_subtype_of_any\nfrom wemake_python_styleguide.violations.complexity import (\n JonesScoreViolation,\n LineComplexityViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\nclass JonesComplexityVisitor(BaseNodeVisitor): # TODO: consider `logical_line`\n \"\"\"\n This visitor is used to find complex lines in the code.\n\n Calculates the number of AST nodes per line of code.\n Also calculates the median nodes/line score.\n Then compares these numbers to the given tressholds.\n\n Some nodes are ignored because there's no sense in analyzing them.\n Some nodes like type annotations are not affecting line complexity,\n so we do not count them.\n \"\"\"\n\n _ignored_nodes = (\n ast.FunctionDef,\n ast.ClassDef,\n ast.AsyncFunctionDef,\n )\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Initializes line number counter.\"\"\"\n super().__init__(*args, **kwargs)\n self._lines: DefaultDict[int, List[ast.AST]] = defaultdict(list)\n self._to_ignore: List[ast.AST] = []\n\n def _post_visit(self) -> None:\n \"\"\"\n Triggers after the whole module was processed.\n\n Checks each line for its complexity, compares it to the tresshold.\n We also calculate the final Jones score for the whole module.\n \"\"\"\n for line_nodes in self._lines.values():\n complexity = len(line_nodes)\n if complexity > self.options.max_line_complexity:\n self.add_violation(LineComplexityViolation(\n line_nodes[0], text=str(complexity),\n ))\n\n node_counts = [len(nodes) for nodes in self._lines.values()]\n total_count = median(node_counts) if node_counts else 0\n if total_count > self.options.max_jones_score:\n self.add_violation(JonesScoreViolation())\n\n def _maybe_ignore_child(self, node: ast.AST) -> bool:\n if isinstance(node, ast.AnnAssign):\n self._to_ignore.append(node.annotation)\n\n return node in self._to_ignore\n\n def visit(self, node: ast.AST) -> None:\n \"\"\"\n Visits all nodes, sums the number of nodes per line.\n\n Then calculates the median value of all line results.\n\n Raises:\n JonesScoreViolation\n LineComplexityViolation\n\n \"\"\"\n line_number = getattr(node, 'lineno', None)\n is_ignored = is_subtype_of_any(node, self._ignored_nodes)\n if line_number is not None and not is_ignored:\n if not self._maybe_ignore_child(node):\n self._lines[line_number].append(node)\n\n self.generic_visit(node)\n", "path": "wemake_python_styleguide/visitors/ast/complexity/jones.py"}]} | 1,284 | 134 |
gh_patches_debug_5671 | rasdani/github-patches | git_diff | projectmesa__mesa-539 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
epstein_civil_violence box doesn't fit grid
<img width="431" alt="screen shot 2018-04-01 at 10 05 11 pm" src="https://user-images.githubusercontent.com/166734/38180219-de2decf8-35f8-11e8-8d9b-562d2fb7c58b.png">
^^ Fix the outline grid on this model. The grid should be the same size as the outline.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/epstein_civil_violence/civil_violence/server.py`
Content:
```
1 from mesa.visualization.ModularVisualization import ModularServer
2 from mesa.visualization.modules import CanvasGrid
3
4 from .model import CivilViolenceModel
5 from .agent import Citizen, Cop
6
7
8 COP_COLOR = "#000000"
9 AGENT_QUIET_COLOR = "#0066CC"
10 AGENT_REBEL_COLOR = "#CC0000"
11 JAIL_COLOR = "#757575"
12
13
14 def citizen_cop_portrayal(agent):
15 if agent is None:
16 return
17
18 portrayal = {"Shape": "circle",
19 "x": agent.pos[0], "y": agent.pos[1],
20 "Filled": "true"}
21
22 if type(agent) is Citizen:
23 color = AGENT_QUIET_COLOR if agent.condition == "Quiescent" else \
24 AGENT_REBEL_COLOR
25 color = JAIL_COLOR if agent.jail_sentence else color
26 portrayal["Color"] = color
27 portrayal["r"] = 0.8
28 portrayal["Layer"] = 0
29
30 elif type(agent) is Cop:
31 portrayal["Color"] = COP_COLOR
32 portrayal["r"] = 0.5
33 portrayal["Layer"] = 1
34 return portrayal
35
36
37 model_params = dict(height=40,
38 width=40,
39 citizen_density=.7,
40 cop_density=.074,
41 citizen_vision=7,
42 cop_vision=7,
43 legitimacy=.8,
44 max_jail_term=1000)
45
46 canvas_element = CanvasGrid(citizen_cop_portrayal, 40, 40, 500, 500)
47 server = ModularServer(CivilViolenceModel, [canvas_element],
48 "Epstein Civil Violence", model_params)
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/epstein_civil_violence/civil_violence/server.py b/examples/epstein_civil_violence/civil_violence/server.py
--- a/examples/epstein_civil_violence/civil_violence/server.py
+++ b/examples/epstein_civil_violence/civil_violence/server.py
@@ -43,6 +43,6 @@
legitimacy=.8,
max_jail_term=1000)
-canvas_element = CanvasGrid(citizen_cop_portrayal, 40, 40, 500, 500)
+canvas_element = CanvasGrid(citizen_cop_portrayal, 40, 40, 480, 480)
server = ModularServer(CivilViolenceModel, [canvas_element],
"Epstein Civil Violence", model_params)
| {"golden_diff": "diff --git a/examples/epstein_civil_violence/civil_violence/server.py b/examples/epstein_civil_violence/civil_violence/server.py\n--- a/examples/epstein_civil_violence/civil_violence/server.py\n+++ b/examples/epstein_civil_violence/civil_violence/server.py\n@@ -43,6 +43,6 @@\n legitimacy=.8,\n max_jail_term=1000)\n \n-canvas_element = CanvasGrid(citizen_cop_portrayal, 40, 40, 500, 500)\n+canvas_element = CanvasGrid(citizen_cop_portrayal, 40, 40, 480, 480)\n server = ModularServer(CivilViolenceModel, [canvas_element],\n \"Epstein Civil Violence\", model_params)\n", "issue": "epstein_civil_violence box doesn't fit grid\n<img width=\"431\" alt=\"screen shot 2018-04-01 at 10 05 11 pm\" src=\"https://user-images.githubusercontent.com/166734/38180219-de2decf8-35f8-11e8-8d9b-562d2fb7c58b.png\">\r\n\r\n^^ Fix the outline grid on this model. The grid should be the same size as the outline.\n", "before_files": [{"content": "from mesa.visualization.ModularVisualization import ModularServer\nfrom mesa.visualization.modules import CanvasGrid\n\nfrom .model import CivilViolenceModel\nfrom .agent import Citizen, Cop\n\n\nCOP_COLOR = \"#000000\"\nAGENT_QUIET_COLOR = \"#0066CC\"\nAGENT_REBEL_COLOR = \"#CC0000\"\nJAIL_COLOR = \"#757575\"\n\n\ndef citizen_cop_portrayal(agent):\n if agent is None:\n return\n\n portrayal = {\"Shape\": \"circle\",\n \"x\": agent.pos[0], \"y\": agent.pos[1],\n \"Filled\": \"true\"}\n\n if type(agent) is Citizen:\n color = AGENT_QUIET_COLOR if agent.condition == \"Quiescent\" else \\\n AGENT_REBEL_COLOR\n color = JAIL_COLOR if agent.jail_sentence else color\n portrayal[\"Color\"] = color\n portrayal[\"r\"] = 0.8\n portrayal[\"Layer\"] = 0\n\n elif type(agent) is Cop:\n portrayal[\"Color\"] = COP_COLOR\n portrayal[\"r\"] = 0.5\n portrayal[\"Layer\"] = 1\n return portrayal\n\n\nmodel_params = dict(height=40,\n width=40,\n citizen_density=.7,\n cop_density=.074,\n citizen_vision=7,\n cop_vision=7,\n legitimacy=.8,\n max_jail_term=1000)\n\ncanvas_element = CanvasGrid(citizen_cop_portrayal, 40, 40, 500, 500)\nserver = ModularServer(CivilViolenceModel, [canvas_element],\n \"Epstein Civil Violence\", model_params)\n", "path": "examples/epstein_civil_violence/civil_violence/server.py"}], "after_files": [{"content": "from mesa.visualization.ModularVisualization import ModularServer\nfrom mesa.visualization.modules import CanvasGrid\n\nfrom .model import CivilViolenceModel\nfrom .agent import Citizen, Cop\n\n\nCOP_COLOR = \"#000000\"\nAGENT_QUIET_COLOR = \"#0066CC\"\nAGENT_REBEL_COLOR = \"#CC0000\"\nJAIL_COLOR = \"#757575\"\n\n\ndef citizen_cop_portrayal(agent):\n if agent is None:\n return\n\n portrayal = {\"Shape\": \"circle\",\n \"x\": agent.pos[0], \"y\": agent.pos[1],\n \"Filled\": \"true\"}\n\n if type(agent) is Citizen:\n color = AGENT_QUIET_COLOR if agent.condition == \"Quiescent\" else \\\n AGENT_REBEL_COLOR\n color = JAIL_COLOR if agent.jail_sentence else color\n portrayal[\"Color\"] = color\n portrayal[\"r\"] = 0.8\n portrayal[\"Layer\"] = 0\n\n elif type(agent) is Cop:\n portrayal[\"Color\"] = COP_COLOR\n portrayal[\"r\"] = 0.5\n portrayal[\"Layer\"] = 1\n return portrayal\n\n\nmodel_params = dict(height=40,\n width=40,\n citizen_density=.7,\n cop_density=.074,\n citizen_vision=7,\n cop_vision=7,\n legitimacy=.8,\n max_jail_term=1000)\n\ncanvas_element = CanvasGrid(citizen_cop_portrayal, 40, 40, 480, 480)\nserver = ModularServer(CivilViolenceModel, [canvas_element],\n \"Epstein Civil Violence\", model_params)\n", "path": "examples/epstein_civil_violence/civil_violence/server.py"}]} | 863 | 193 |
gh_patches_debug_39517 | rasdani/github-patches | git_diff | strawberry-graphql__strawberry-55 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for interfaces
We should be able to define interfaces with strawberry, something like this:
```python
@strawberry.interface
class Node:
id: strawberry.ID
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `strawberry/type.py`
Content:
```
1 import typing
2 from functools import partial
3
4 from dataclasses import dataclass
5 from graphql import (
6 GraphQLField,
7 GraphQLInputField,
8 GraphQLInputObjectType,
9 GraphQLObjectType,
10 )
11 from graphql.utilities.schema_printer import print_type
12
13 from .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT
14 from .type_converter import REGISTRY, get_graphql_type_for_annotation
15 from .utils.str_converters import to_camel_case
16
17
18 def _get_resolver(cls, field_name):
19 def _resolver(obj, info):
20 # TODO: can we make this nicer?
21 # does it work in all the cases?
22
23 field_resolver = getattr(cls(**(obj.__dict__ if obj else {})), field_name)
24
25 if getattr(field_resolver, IS_STRAWBERRY_FIELD, False):
26 return field_resolver(obj, info)
27
28 return field_resolver
29
30 return _resolver
31
32
33 def _convert_annotations_fields(cls, *, is_input=False):
34 FieldClass = GraphQLInputField if is_input else GraphQLField
35 annotations = typing.get_type_hints(cls, None, REGISTRY)
36
37 fields = {}
38
39 for key, annotation in annotations.items():
40 field_name = to_camel_case(key)
41 class_field = getattr(cls, key, None)
42
43 description = getattr(class_field, "description", None)
44
45 fields[field_name] = FieldClass(
46 get_graphql_type_for_annotation(annotation, key),
47 description=description,
48 **({} if is_input else {"resolve": _get_resolver(cls, key)})
49 )
50
51 return fields
52
53
54 def _process_type(cls, *, is_input=False, description=None):
55 name = cls.__name__
56 REGISTRY[name] = cls
57
58 def repr_(self):
59 return print_type(self.field)
60
61 setattr(cls, "__repr__", repr_)
62
63 def _get_fields():
64 fields = _convert_annotations_fields(cls, is_input=is_input)
65
66 fields.update(
67 {
68 to_camel_case(key): value.field
69 for key, value in cls.__dict__.items()
70 if getattr(value, IS_STRAWBERRY_FIELD, False)
71 }
72 )
73
74 return fields
75
76 if is_input:
77 setattr(cls, IS_STRAWBERRY_INPUT, True)
78
79 extra_kwargs = {"description": description or cls.__doc__}
80
81 TypeClass = GraphQLInputObjectType if is_input else GraphQLObjectType
82 cls.field = TypeClass(name, lambda: _get_fields(), **extra_kwargs)
83
84 return dataclass(cls, repr=False)
85
86
87 def type(cls=None, *, is_input=False, description=None):
88 """Annotates a class as a GraphQL type.
89
90 Example usage:
91
92 >>> @strawberry.type:
93 >>> class X:
94 >>> field_abc: str = "ABC"
95 """
96
97 def wrap(cls):
98 return _process_type(cls, is_input=is_input, description=description)
99
100 if cls is None:
101 return wrap
102
103 return wrap(cls)
104
105
106 input = partial(type, is_input=True)
107
```
Path: `strawberry/constants.py`
Content:
```
1 IS_STRAWBERRY_FIELD = "_is_strawberry_field"
2 IS_STRAWBERRY_INPUT = "_is_strawberry_input"
3
```
Path: `strawberry/__init__.py`
Content:
```
1 __version__ = "0.1.0"
2
3
4 from .enum import enum # noqa
5 from .field import field # noqa
6 from .mutation import mutation, subscription # noqa
7 from .scalars import ID # noqa
8 from .schema import Schema # noqa
9 from .type import input, type # noqa
10
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/strawberry/__init__.py b/strawberry/__init__.py
--- a/strawberry/__init__.py
+++ b/strawberry/__init__.py
@@ -6,4 +6,4 @@
from .mutation import mutation, subscription # noqa
from .scalars import ID # noqa
from .schema import Schema # noqa
-from .type import input, type # noqa
+from .type import input, type, interface # noqa
diff --git a/strawberry/constants.py b/strawberry/constants.py
--- a/strawberry/constants.py
+++ b/strawberry/constants.py
@@ -1,2 +1,3 @@
IS_STRAWBERRY_FIELD = "_is_strawberry_field"
IS_STRAWBERRY_INPUT = "_is_strawberry_input"
+IS_STRAWBERRY_INTERFACE = "_is_strawberry_interface"
diff --git a/strawberry/type.py b/strawberry/type.py
--- a/strawberry/type.py
+++ b/strawberry/type.py
@@ -6,11 +6,12 @@
GraphQLField,
GraphQLInputField,
GraphQLInputObjectType,
+ GraphQLInterfaceType,
GraphQLObjectType,
)
from graphql.utilities.schema_printer import print_type
-from .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT
+from .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT, IS_STRAWBERRY_INTERFACE
from .type_converter import REGISTRY, get_graphql_type_for_annotation
from .utils.str_converters import to_camel_case
@@ -51,7 +52,7 @@
return fields
-def _process_type(cls, *, is_input=False, description=None):
+def _process_type(cls, *, is_input=False, is_interface=False, description=None):
name = cls.__name__
REGISTRY[name] = cls
@@ -75,16 +76,30 @@
if is_input:
setattr(cls, IS_STRAWBERRY_INPUT, True)
+ elif is_interface:
+ setattr(cls, IS_STRAWBERRY_INTERFACE, True)
extra_kwargs = {"description": description or cls.__doc__}
- TypeClass = GraphQLInputObjectType if is_input else GraphQLObjectType
+ if is_input:
+ TypeClass = GraphQLInputObjectType
+ elif is_interface:
+ TypeClass = GraphQLInterfaceType
+ else:
+ TypeClass = GraphQLObjectType
+
+ extra_kwargs["interfaces"] = [
+ klass.field
+ for klass in cls.__bases__
+ if hasattr(klass, IS_STRAWBERRY_INTERFACE)
+ ]
+
cls.field = TypeClass(name, lambda: _get_fields(), **extra_kwargs)
return dataclass(cls, repr=False)
-def type(cls=None, *, is_input=False, description=None):
+def type(cls=None, *, is_input=False, is_interface=False, description=None):
"""Annotates a class as a GraphQL type.
Example usage:
@@ -95,7 +110,9 @@
"""
def wrap(cls):
- return _process_type(cls, is_input=is_input, description=description)
+ return _process_type(
+ cls, is_input=is_input, is_interface=is_interface, description=description
+ )
if cls is None:
return wrap
@@ -104,3 +121,4 @@
input = partial(type, is_input=True)
+interface = partial(type, is_interface=True)
| {"golden_diff": "diff --git a/strawberry/__init__.py b/strawberry/__init__.py\n--- a/strawberry/__init__.py\n+++ b/strawberry/__init__.py\n@@ -6,4 +6,4 @@\n from .mutation import mutation, subscription # noqa\n from .scalars import ID # noqa\n from .schema import Schema # noqa\n-from .type import input, type # noqa\n+from .type import input, type, interface # noqa\ndiff --git a/strawberry/constants.py b/strawberry/constants.py\n--- a/strawberry/constants.py\n+++ b/strawberry/constants.py\n@@ -1,2 +1,3 @@\n IS_STRAWBERRY_FIELD = \"_is_strawberry_field\"\n IS_STRAWBERRY_INPUT = \"_is_strawberry_input\"\n+IS_STRAWBERRY_INTERFACE = \"_is_strawberry_interface\"\ndiff --git a/strawberry/type.py b/strawberry/type.py\n--- a/strawberry/type.py\n+++ b/strawberry/type.py\n@@ -6,11 +6,12 @@\n GraphQLField,\n GraphQLInputField,\n GraphQLInputObjectType,\n+ GraphQLInterfaceType,\n GraphQLObjectType,\n )\n from graphql.utilities.schema_printer import print_type\n \n-from .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT\n+from .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT, IS_STRAWBERRY_INTERFACE\n from .type_converter import REGISTRY, get_graphql_type_for_annotation\n from .utils.str_converters import to_camel_case\n \n@@ -51,7 +52,7 @@\n return fields\n \n \n-def _process_type(cls, *, is_input=False, description=None):\n+def _process_type(cls, *, is_input=False, is_interface=False, description=None):\n name = cls.__name__\n REGISTRY[name] = cls\n \n@@ -75,16 +76,30 @@\n \n if is_input:\n setattr(cls, IS_STRAWBERRY_INPUT, True)\n+ elif is_interface:\n+ setattr(cls, IS_STRAWBERRY_INTERFACE, True)\n \n extra_kwargs = {\"description\": description or cls.__doc__}\n \n- TypeClass = GraphQLInputObjectType if is_input else GraphQLObjectType\n+ if is_input:\n+ TypeClass = GraphQLInputObjectType\n+ elif is_interface:\n+ TypeClass = GraphQLInterfaceType\n+ else:\n+ TypeClass = GraphQLObjectType\n+\n+ extra_kwargs[\"interfaces\"] = [\n+ klass.field\n+ for klass in cls.__bases__\n+ if hasattr(klass, IS_STRAWBERRY_INTERFACE)\n+ ]\n+\n cls.field = TypeClass(name, lambda: _get_fields(), **extra_kwargs)\n \n return dataclass(cls, repr=False)\n \n \n-def type(cls=None, *, is_input=False, description=None):\n+def type(cls=None, *, is_input=False, is_interface=False, description=None):\n \"\"\"Annotates a class as a GraphQL type.\n \n Example usage:\n@@ -95,7 +110,9 @@\n \"\"\"\n \n def wrap(cls):\n- return _process_type(cls, is_input=is_input, description=description)\n+ return _process_type(\n+ cls, is_input=is_input, is_interface=is_interface, description=description\n+ )\n \n if cls is None:\n return wrap\n@@ -104,3 +121,4 @@\n \n \n input = partial(type, is_input=True)\n+interface = partial(type, is_interface=True)\n", "issue": "Add support for interfaces\nWe should be able to define interfaces with strawberry, something like this:\r\n\r\n```python\r\n\r\[email protected]\r\nclass Node:\r\n id: strawberry.ID\r\n```\n", "before_files": [{"content": "import typing\nfrom functools import partial\n\nfrom dataclasses import dataclass\nfrom graphql import (\n GraphQLField,\n GraphQLInputField,\n GraphQLInputObjectType,\n GraphQLObjectType,\n)\nfrom graphql.utilities.schema_printer import print_type\n\nfrom .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT\nfrom .type_converter import REGISTRY, get_graphql_type_for_annotation\nfrom .utils.str_converters import to_camel_case\n\n\ndef _get_resolver(cls, field_name):\n def _resolver(obj, info):\n # TODO: can we make this nicer?\n # does it work in all the cases?\n\n field_resolver = getattr(cls(**(obj.__dict__ if obj else {})), field_name)\n\n if getattr(field_resolver, IS_STRAWBERRY_FIELD, False):\n return field_resolver(obj, info)\n\n return field_resolver\n\n return _resolver\n\n\ndef _convert_annotations_fields(cls, *, is_input=False):\n FieldClass = GraphQLInputField if is_input else GraphQLField\n annotations = typing.get_type_hints(cls, None, REGISTRY)\n\n fields = {}\n\n for key, annotation in annotations.items():\n field_name = to_camel_case(key)\n class_field = getattr(cls, key, None)\n\n description = getattr(class_field, \"description\", None)\n\n fields[field_name] = FieldClass(\n get_graphql_type_for_annotation(annotation, key),\n description=description,\n **({} if is_input else {\"resolve\": _get_resolver(cls, key)})\n )\n\n return fields\n\n\ndef _process_type(cls, *, is_input=False, description=None):\n name = cls.__name__\n REGISTRY[name] = cls\n\n def repr_(self):\n return print_type(self.field)\n\n setattr(cls, \"__repr__\", repr_)\n\n def _get_fields():\n fields = _convert_annotations_fields(cls, is_input=is_input)\n\n fields.update(\n {\n to_camel_case(key): value.field\n for key, value in cls.__dict__.items()\n if getattr(value, IS_STRAWBERRY_FIELD, False)\n }\n )\n\n return fields\n\n if is_input:\n setattr(cls, IS_STRAWBERRY_INPUT, True)\n\n extra_kwargs = {\"description\": description or cls.__doc__}\n\n TypeClass = GraphQLInputObjectType if is_input else GraphQLObjectType\n cls.field = TypeClass(name, lambda: _get_fields(), **extra_kwargs)\n\n return dataclass(cls, repr=False)\n\n\ndef type(cls=None, *, is_input=False, description=None):\n \"\"\"Annotates a class as a GraphQL type.\n\n Example usage:\n\n >>> @strawberry.type:\n >>> class X:\n >>> field_abc: str = \"ABC\"\n \"\"\"\n\n def wrap(cls):\n return _process_type(cls, is_input=is_input, description=description)\n\n if cls is None:\n return wrap\n\n return wrap(cls)\n\n\ninput = partial(type, is_input=True)\n", "path": "strawberry/type.py"}, {"content": "IS_STRAWBERRY_FIELD = \"_is_strawberry_field\"\nIS_STRAWBERRY_INPUT = \"_is_strawberry_input\"\n", "path": "strawberry/constants.py"}, {"content": "__version__ = \"0.1.0\"\n\n\nfrom .enum import enum # noqa\nfrom .field import field # noqa\nfrom .mutation import mutation, subscription # noqa\nfrom .scalars import ID # noqa\nfrom .schema import Schema # noqa\nfrom .type import input, type # noqa\n", "path": "strawberry/__init__.py"}], "after_files": [{"content": "import typing\nfrom functools import partial\n\nfrom dataclasses import dataclass\nfrom graphql import (\n GraphQLField,\n GraphQLInputField,\n GraphQLInputObjectType,\n GraphQLInterfaceType,\n GraphQLObjectType,\n)\nfrom graphql.utilities.schema_printer import print_type\n\nfrom .constants import IS_STRAWBERRY_FIELD, IS_STRAWBERRY_INPUT, IS_STRAWBERRY_INTERFACE\nfrom .type_converter import REGISTRY, get_graphql_type_for_annotation\nfrom .utils.str_converters import to_camel_case\n\n\ndef _get_resolver(cls, field_name):\n def _resolver(obj, info):\n # TODO: can we make this nicer?\n # does it work in all the cases?\n\n field_resolver = getattr(cls(**(obj.__dict__ if obj else {})), field_name)\n\n if getattr(field_resolver, IS_STRAWBERRY_FIELD, False):\n return field_resolver(obj, info)\n\n return field_resolver\n\n return _resolver\n\n\ndef _convert_annotations_fields(cls, *, is_input=False):\n FieldClass = GraphQLInputField if is_input else GraphQLField\n annotations = typing.get_type_hints(cls, None, REGISTRY)\n\n fields = {}\n\n for key, annotation in annotations.items():\n field_name = to_camel_case(key)\n class_field = getattr(cls, key, None)\n\n description = getattr(class_field, \"description\", None)\n\n fields[field_name] = FieldClass(\n get_graphql_type_for_annotation(annotation, key),\n description=description,\n **({} if is_input else {\"resolve\": _get_resolver(cls, key)})\n )\n\n return fields\n\n\ndef _process_type(cls, *, is_input=False, is_interface=False, description=None):\n name = cls.__name__\n REGISTRY[name] = cls\n\n def repr_(self):\n return print_type(self.field)\n\n setattr(cls, \"__repr__\", repr_)\n\n def _get_fields():\n fields = _convert_annotations_fields(cls, is_input=is_input)\n\n fields.update(\n {\n to_camel_case(key): value.field\n for key, value in cls.__dict__.items()\n if getattr(value, IS_STRAWBERRY_FIELD, False)\n }\n )\n\n return fields\n\n if is_input:\n setattr(cls, IS_STRAWBERRY_INPUT, True)\n elif is_interface:\n setattr(cls, IS_STRAWBERRY_INTERFACE, True)\n\n extra_kwargs = {\"description\": description or cls.__doc__}\n\n if is_input:\n TypeClass = GraphQLInputObjectType\n elif is_interface:\n TypeClass = GraphQLInterfaceType\n else:\n TypeClass = GraphQLObjectType\n\n extra_kwargs[\"interfaces\"] = [\n klass.field\n for klass in cls.__bases__\n if hasattr(klass, IS_STRAWBERRY_INTERFACE)\n ]\n\n cls.field = TypeClass(name, lambda: _get_fields(), **extra_kwargs)\n\n return dataclass(cls, repr=False)\n\n\ndef type(cls=None, *, is_input=False, is_interface=False, description=None):\n \"\"\"Annotates a class as a GraphQL type.\n\n Example usage:\n\n >>> @strawberry.type:\n >>> class X:\n >>> field_abc: str = \"ABC\"\n \"\"\"\n\n def wrap(cls):\n return _process_type(\n cls, is_input=is_input, is_interface=is_interface, description=description\n )\n\n if cls is None:\n return wrap\n\n return wrap(cls)\n\n\ninput = partial(type, is_input=True)\ninterface = partial(type, is_interface=True)\n", "path": "strawberry/type.py"}, {"content": "IS_STRAWBERRY_FIELD = \"_is_strawberry_field\"\nIS_STRAWBERRY_INPUT = \"_is_strawberry_input\"\nIS_STRAWBERRY_INTERFACE = \"_is_strawberry_interface\"\n", "path": "strawberry/constants.py"}, {"content": "__version__ = \"0.1.0\"\n\n\nfrom .enum import enum # noqa\nfrom .field import field # noqa\nfrom .mutation import mutation, subscription # noqa\nfrom .scalars import ID # noqa\nfrom .schema import Schema # noqa\nfrom .type import input, type, interface # noqa\n", "path": "strawberry/__init__.py"}]} | 1,308 | 771 |
gh_patches_debug_16 | rasdani/github-patches | git_diff | OCHA-DAP__hdx-ckan-1401 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The MailChimp subscribe field could use a little bit more padding-left
Right now the input text is too close to the left border. It would be nice to add some padding there.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckanext-hdx_theme/ckanext/hdx_theme/version.py`
Content:
```
1 hdx_version = 'v0.3.9'
2
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py
@@ -1 +1 @@
-hdx_version = 'v0.3.9'
+hdx_version = 'v0.3.10'
| {"golden_diff": "diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/version.py b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n--- a/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n+++ b/ckanext-hdx_theme/ckanext/hdx_theme/version.py\n@@ -1 +1 @@\n-hdx_version = 'v0.3.9'\n+hdx_version = 'v0.3.10'\n", "issue": "The MailChimp subscribe field could use a little bit more padding-left\nRight now the input text is too close to the left border. It would be nice to add some padding there. \n\n\n\n", "before_files": [{"content": "hdx_version = 'v0.3.9'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}], "after_files": [{"content": "hdx_version = 'v0.3.10'\n", "path": "ckanext-hdx_theme/ckanext/hdx_theme/version.py"}]} | 405 | 107 |
gh_patches_debug_24750 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-7636 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[CT-2479] replace all instances of set-output and node16
Details in https://github.com/dbt-labs/actions/issues/39.
### Acceptance Criteria
- [ ] Verified there are no workflows to update
_or_
- [ ] removed all uses of `set-output` - either directly or up updating any marketplace actions we reference
- [ ] removed all references to node16 - either directly or up updating any marketplace actions we reference
- [ ] backport changes
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `.github/actions/latest-wrangler/main.py`
Content:
```
1 import os
2 import sys
3 import requests
4 from distutils.util import strtobool
5 from typing import Union
6 from packaging.version import parse, Version
7
8 if __name__ == "__main__":
9
10 # get inputs
11 package = os.environ["INPUT_PACKAGE"]
12 new_version = parse(os.environ["INPUT_NEW_VERSION"])
13 gh_token = os.environ["INPUT_GH_TOKEN"]
14 halt_on_missing = strtobool(os.environ.get("INPUT_HALT_ON_MISSING", "False"))
15
16 # get package metadata from github
17 package_request = requests.get(
18 f"https://api.github.com/orgs/dbt-labs/packages/container/{package}/versions",
19 auth=("", gh_token),
20 )
21 package_meta = package_request.json()
22
23 # Log info if we don't get a 200
24 if package_request.status_code != 200:
25 print(f"Call to GH API failed: {package_request.status_code} {package_meta['message']}")
26
27 # Make an early exit if there is no matching package in github
28 if package_request.status_code == 404:
29 if halt_on_missing:
30 sys.exit(1)
31 else:
32 # everything is the latest if the package doesn't exist
33 print(f"::set-output name=latest::{True}")
34 print(f"::set-output name=minor_latest::{True}")
35 sys.exit(0)
36
37 # TODO: verify package meta is "correct"
38 # https://github.com/dbt-labs/dbt-core/issues/4640
39
40 # map versions and tags
41 version_tag_map = {
42 version["id"]: version["metadata"]["container"]["tags"] for version in package_meta
43 }
44
45 # is pre-release
46 pre_rel = True if any(x in str(new_version) for x in ["a", "b", "rc"]) else False
47
48 # semver of current latest
49 for version, tags in version_tag_map.items():
50 if "latest" in tags:
51 # N.B. This seems counterintuitive, but we expect any version tagged
52 # 'latest' to have exactly three associated tags:
53 # latest, major.minor.latest, and major.minor.patch.
54 # Subtracting everything that contains the string 'latest' gets us
55 # the major.minor.patch which is what's needed for comparison.
56 current_latest = parse([tag for tag in tags if "latest" not in tag][0])
57 else:
58 current_latest = False
59
60 # semver of current_minor_latest
61 for version, tags in version_tag_map.items():
62 if f"{new_version.major}.{new_version.minor}.latest" in tags:
63 # Similar to above, only now we expect exactly two tags:
64 # major.minor.patch and major.minor.latest
65 current_minor_latest = parse([tag for tag in tags if "latest" not in tag][0])
66 else:
67 current_minor_latest = False
68
69 def is_latest(
70 pre_rel: bool, new_version: Version, remote_latest: Union[bool, Version]
71 ) -> bool:
72 """Determine if a given contaier should be tagged 'latest' based on:
73 - it's pre-release status
74 - it's version
75 - the version of a previously identified container tagged 'latest'
76
77 :param pre_rel: Wether or not the version of the new container is a pre-release
78 :param new_version: The version of the new container
79 :param remote_latest: The version of the previously identified container that's
80 already tagged latest or False
81 """
82 # is a pre-release = not latest
83 if pre_rel:
84 return False
85 # + no latest tag found = is latest
86 if not remote_latest:
87 return True
88 # + if remote version is lower than current = is latest, else not latest
89 return True if remote_latest <= new_version else False
90
91 latest = is_latest(pre_rel, new_version, current_latest)
92 minor_latest = is_latest(pre_rel, new_version, current_minor_latest)
93
94 print(f"::set-output name=latest::{latest}")
95 print(f"::set-output name=minor_latest::{minor_latest}")
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/.github/actions/latest-wrangler/main.py b/.github/actions/latest-wrangler/main.py
--- a/.github/actions/latest-wrangler/main.py
+++ b/.github/actions/latest-wrangler/main.py
@@ -28,11 +28,12 @@
if package_request.status_code == 404:
if halt_on_missing:
sys.exit(1)
- else:
- # everything is the latest if the package doesn't exist
- print(f"::set-output name=latest::{True}")
- print(f"::set-output name=minor_latest::{True}")
- sys.exit(0)
+ # everything is the latest if the package doesn't exist
+ github_output = os.environ.get("GITHUB_OUTPUT")
+ with open(github_output, "at", encoding="utf-8") as gh_output:
+ gh_output.write("latest=True")
+ gh_output.write("minor_latest=True")
+ sys.exit(0)
# TODO: verify package meta is "correct"
# https://github.com/dbt-labs/dbt-core/issues/4640
@@ -91,5 +92,7 @@
latest = is_latest(pre_rel, new_version, current_latest)
minor_latest = is_latest(pre_rel, new_version, current_minor_latest)
- print(f"::set-output name=latest::{latest}")
- print(f"::set-output name=minor_latest::{minor_latest}")
+ github_output = os.environ.get("GITHUB_OUTPUT")
+ with open(github_output, "at", encoding="utf-8") as gh_output:
+ gh_output.write(f"latest={latest}")
+ gh_output.write(f"minor_latest={minor_latest}")
| {"golden_diff": "diff --git a/.github/actions/latest-wrangler/main.py b/.github/actions/latest-wrangler/main.py\n--- a/.github/actions/latest-wrangler/main.py\n+++ b/.github/actions/latest-wrangler/main.py\n@@ -28,11 +28,12 @@\n if package_request.status_code == 404:\n if halt_on_missing:\n sys.exit(1)\n- else:\n- # everything is the latest if the package doesn't exist\n- print(f\"::set-output name=latest::{True}\")\n- print(f\"::set-output name=minor_latest::{True}\")\n- sys.exit(0)\n+ # everything is the latest if the package doesn't exist\n+ github_output = os.environ.get(\"GITHUB_OUTPUT\")\n+ with open(github_output, \"at\", encoding=\"utf-8\") as gh_output:\n+ gh_output.write(\"latest=True\")\n+ gh_output.write(\"minor_latest=True\")\n+ sys.exit(0)\n \n # TODO: verify package meta is \"correct\"\n # https://github.com/dbt-labs/dbt-core/issues/4640\n@@ -91,5 +92,7 @@\n latest = is_latest(pre_rel, new_version, current_latest)\n minor_latest = is_latest(pre_rel, new_version, current_minor_latest)\n \n- print(f\"::set-output name=latest::{latest}\")\n- print(f\"::set-output name=minor_latest::{minor_latest}\")\n+ github_output = os.environ.get(\"GITHUB_OUTPUT\")\n+ with open(github_output, \"at\", encoding=\"utf-8\") as gh_output:\n+ gh_output.write(f\"latest={latest}\")\n+ gh_output.write(f\"minor_latest={minor_latest}\")\n", "issue": "[CT-2479] replace all instances of set-output and node16\nDetails in https://github.com/dbt-labs/actions/issues/39.\r\n\r\n### Acceptance Criteria\r\n- [ ] Verified there are no workflows to update\r\n_or_\r\n- [ ] removed all uses of `set-output` - either directly or up updating any marketplace actions we reference\r\n- [ ] removed all references to node16 - either directly or up updating any marketplace actions we reference\r\n- [ ] backport changes\n", "before_files": [{"content": "import os\nimport sys\nimport requests\nfrom distutils.util import strtobool\nfrom typing import Union\nfrom packaging.version import parse, Version\n\nif __name__ == \"__main__\":\n\n # get inputs\n package = os.environ[\"INPUT_PACKAGE\"]\n new_version = parse(os.environ[\"INPUT_NEW_VERSION\"])\n gh_token = os.environ[\"INPUT_GH_TOKEN\"]\n halt_on_missing = strtobool(os.environ.get(\"INPUT_HALT_ON_MISSING\", \"False\"))\n\n # get package metadata from github\n package_request = requests.get(\n f\"https://api.github.com/orgs/dbt-labs/packages/container/{package}/versions\",\n auth=(\"\", gh_token),\n )\n package_meta = package_request.json()\n\n # Log info if we don't get a 200\n if package_request.status_code != 200:\n print(f\"Call to GH API failed: {package_request.status_code} {package_meta['message']}\")\n\n # Make an early exit if there is no matching package in github\n if package_request.status_code == 404:\n if halt_on_missing:\n sys.exit(1)\n else:\n # everything is the latest if the package doesn't exist\n print(f\"::set-output name=latest::{True}\")\n print(f\"::set-output name=minor_latest::{True}\")\n sys.exit(0)\n\n # TODO: verify package meta is \"correct\"\n # https://github.com/dbt-labs/dbt-core/issues/4640\n\n # map versions and tags\n version_tag_map = {\n version[\"id\"]: version[\"metadata\"][\"container\"][\"tags\"] for version in package_meta\n }\n\n # is pre-release\n pre_rel = True if any(x in str(new_version) for x in [\"a\", \"b\", \"rc\"]) else False\n\n # semver of current latest\n for version, tags in version_tag_map.items():\n if \"latest\" in tags:\n # N.B. This seems counterintuitive, but we expect any version tagged\n # 'latest' to have exactly three associated tags:\n # latest, major.minor.latest, and major.minor.patch.\n # Subtracting everything that contains the string 'latest' gets us\n # the major.minor.patch which is what's needed for comparison.\n current_latest = parse([tag for tag in tags if \"latest\" not in tag][0])\n else:\n current_latest = False\n\n # semver of current_minor_latest\n for version, tags in version_tag_map.items():\n if f\"{new_version.major}.{new_version.minor}.latest\" in tags:\n # Similar to above, only now we expect exactly two tags:\n # major.minor.patch and major.minor.latest\n current_minor_latest = parse([tag for tag in tags if \"latest\" not in tag][0])\n else:\n current_minor_latest = False\n\n def is_latest(\n pre_rel: bool, new_version: Version, remote_latest: Union[bool, Version]\n ) -> bool:\n \"\"\"Determine if a given contaier should be tagged 'latest' based on:\n - it's pre-release status\n - it's version\n - the version of a previously identified container tagged 'latest'\n\n :param pre_rel: Wether or not the version of the new container is a pre-release\n :param new_version: The version of the new container\n :param remote_latest: The version of the previously identified container that's\n already tagged latest or False\n \"\"\"\n # is a pre-release = not latest\n if pre_rel:\n return False\n # + no latest tag found = is latest\n if not remote_latest:\n return True\n # + if remote version is lower than current = is latest, else not latest\n return True if remote_latest <= new_version else False\n\n latest = is_latest(pre_rel, new_version, current_latest)\n minor_latest = is_latest(pre_rel, new_version, current_minor_latest)\n\n print(f\"::set-output name=latest::{latest}\")\n print(f\"::set-output name=minor_latest::{minor_latest}\")\n", "path": ".github/actions/latest-wrangler/main.py"}], "after_files": [{"content": "import os\nimport sys\nimport requests\nfrom distutils.util import strtobool\nfrom typing import Union\nfrom packaging.version import parse, Version\n\nif __name__ == \"__main__\":\n\n # get inputs\n package = os.environ[\"INPUT_PACKAGE\"]\n new_version = parse(os.environ[\"INPUT_NEW_VERSION\"])\n gh_token = os.environ[\"INPUT_GH_TOKEN\"]\n halt_on_missing = strtobool(os.environ.get(\"INPUT_HALT_ON_MISSING\", \"False\"))\n\n # get package metadata from github\n package_request = requests.get(\n f\"https://api.github.com/orgs/dbt-labs/packages/container/{package}/versions\",\n auth=(\"\", gh_token),\n )\n package_meta = package_request.json()\n\n # Log info if we don't get a 200\n if package_request.status_code != 200:\n print(f\"Call to GH API failed: {package_request.status_code} {package_meta['message']}\")\n\n # Make an early exit if there is no matching package in github\n if package_request.status_code == 404:\n if halt_on_missing:\n sys.exit(1)\n # everything is the latest if the package doesn't exist\n github_output = os.environ.get(\"GITHUB_OUTPUT\")\n with open(github_output, \"at\", encoding=\"utf-8\") as gh_output:\n gh_output.write(\"latest=True\")\n gh_output.write(\"minor_latest=True\")\n sys.exit(0)\n\n # TODO: verify package meta is \"correct\"\n # https://github.com/dbt-labs/dbt-core/issues/4640\n\n # map versions and tags\n version_tag_map = {\n version[\"id\"]: version[\"metadata\"][\"container\"][\"tags\"] for version in package_meta\n }\n\n # is pre-release\n pre_rel = True if any(x in str(new_version) for x in [\"a\", \"b\", \"rc\"]) else False\n\n # semver of current latest\n for version, tags in version_tag_map.items():\n if \"latest\" in tags:\n # N.B. This seems counterintuitive, but we expect any version tagged\n # 'latest' to have exactly three associated tags:\n # latest, major.minor.latest, and major.minor.patch.\n # Subtracting everything that contains the string 'latest' gets us\n # the major.minor.patch which is what's needed for comparison.\n current_latest = parse([tag for tag in tags if \"latest\" not in tag][0])\n else:\n current_latest = False\n\n # semver of current_minor_latest\n for version, tags in version_tag_map.items():\n if f\"{new_version.major}.{new_version.minor}.latest\" in tags:\n # Similar to above, only now we expect exactly two tags:\n # major.minor.patch and major.minor.latest\n current_minor_latest = parse([tag for tag in tags if \"latest\" not in tag][0])\n else:\n current_minor_latest = False\n\n def is_latest(\n pre_rel: bool, new_version: Version, remote_latest: Union[bool, Version]\n ) -> bool:\n \"\"\"Determine if a given contaier should be tagged 'latest' based on:\n - it's pre-release status\n - it's version\n - the version of a previously identified container tagged 'latest'\n\n :param pre_rel: Wether or not the version of the new container is a pre-release\n :param new_version: The version of the new container\n :param remote_latest: The version of the previously identified container that's\n already tagged latest or False\n \"\"\"\n # is a pre-release = not latest\n if pre_rel:\n return False\n # + no latest tag found = is latest\n if not remote_latest:\n return True\n # + if remote version is lower than current = is latest, else not latest\n return True if remote_latest <= new_version else False\n\n latest = is_latest(pre_rel, new_version, current_latest)\n minor_latest = is_latest(pre_rel, new_version, current_minor_latest)\n\n github_output = os.environ.get(\"GITHUB_OUTPUT\")\n with open(github_output, \"at\", encoding=\"utf-8\") as gh_output:\n gh_output.write(f\"latest={latest}\")\n gh_output.write(f\"minor_latest={minor_latest}\")\n", "path": ".github/actions/latest-wrangler/main.py"}]} | 1,441 | 377 |
gh_patches_debug_37590 | rasdani/github-patches | git_diff | urllib3__urllib3-840 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
urllib3 attempts to use IPv6 even when IPv6 is disabled
This is an issue when running on a server without IPv6 (must be disabled because the network does not support it). Example when connecting to https://graph.facebook.com using requests and IPv4 happens to fail:
```
HTTPSConnectionPool(host='graph.facebook.com', port=443): Max retries exceeded with url: /v2.5/me/feed (Caused by NewConnectionError('<requests.packages.urllib3.connection.VerifiedHTTPSConnection object at 0x7f4dbd158518>: Failed to establish a new connection: [Errno 97] Address family not supported by protocol',))
Traceback (most recent call last):
File "/home/lib/python3.4/site-packages/requests/packages/urllib3/connection.py", line 137, in _new_conn
(self.host, self.port), self.timeout, **extra_kw)
File "/home/lib/python3.4/site-packages/requests/packages/urllib3/util/connection.py", line 91, in create_connection
raise err
File "/home/lib/python3.4/site-packages/requests/packages/urllib3/util/connection.py", line 71, in create_connection
sock = socket.socket(af, socktype, proto)
File "/usr/lib/python3.4/socket.py", line 126, in __init__
_socket.socket.__init__(self, family, type, proto, fileno)
OSError: [Errno 97] Address family not supported by protocol
```
urllib3 should throw an exception after exhausting all IPv4 options instead of trying (and invariably failing) IPv6.
See closed issue https://github.com/kennethreitz/requests/issues/3084.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `urllib3/util/connection.py`
Content:
```
1 from __future__ import absolute_import
2 import socket
3 try:
4 from select import poll, POLLIN
5 except ImportError: # `poll` doesn't exist on OSX and other platforms
6 poll = False
7 try:
8 from select import select
9 except ImportError: # `select` doesn't exist on AppEngine.
10 select = False
11
12
13 def is_connection_dropped(conn): # Platform-specific
14 """
15 Returns True if the connection is dropped and should be closed.
16
17 :param conn:
18 :class:`httplib.HTTPConnection` object.
19
20 Note: For platforms like AppEngine, this will always return ``False`` to
21 let the platform handle connection recycling transparently for us.
22 """
23 sock = getattr(conn, 'sock', False)
24 if sock is False: # Platform-specific: AppEngine
25 return False
26 if sock is None: # Connection already closed (such as by httplib).
27 return True
28
29 if not poll:
30 if not select: # Platform-specific: AppEngine
31 return False
32
33 try:
34 return select([sock], [], [], 0.0)[0]
35 except socket.error:
36 return True
37
38 # This version is better on platforms that support it.
39 p = poll()
40 p.register(sock, POLLIN)
41 for (fno, ev) in p.poll(0.0):
42 if fno == sock.fileno():
43 # Either data is buffered (bad), or the connection is dropped.
44 return True
45
46
47 # This function is copied from socket.py in the Python 2.7 standard
48 # library test suite. Added to its signature is only `socket_options`.
49 def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
50 source_address=None, socket_options=None):
51 """Connect to *address* and return the socket object.
52
53 Convenience function. Connect to *address* (a 2-tuple ``(host,
54 port)``) and return the socket object. Passing the optional
55 *timeout* parameter will set the timeout on the socket instance
56 before attempting to connect. If no *timeout* is supplied, the
57 global default timeout setting returned by :func:`getdefaulttimeout`
58 is used. If *source_address* is set it must be a tuple of (host, port)
59 for the socket to bind as a source address before making the connection.
60 An host of '' or port 0 tells the OS to use the default.
61 """
62
63 host, port = address
64 if host.startswith('['):
65 host = host.strip('[]')
66 err = None
67 for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
68 af, socktype, proto, canonname, sa = res
69 sock = None
70 try:
71 sock = socket.socket(af, socktype, proto)
72
73 # If provided, set socket level options before connecting.
74 # This is the only addition urllib3 makes to this function.
75 _set_socket_options(sock, socket_options)
76
77 if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
78 sock.settimeout(timeout)
79 if source_address:
80 sock.bind(source_address)
81 sock.connect(sa)
82 return sock
83
84 except socket.error as e:
85 err = e
86 if sock is not None:
87 sock.close()
88 sock = None
89
90 if err is not None:
91 raise err
92
93 raise socket.error("getaddrinfo returns an empty list")
94
95
96 def _set_socket_options(sock, options):
97 if options is None:
98 return
99
100 for opt in options:
101 sock.setsockopt(*opt)
102
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/urllib3/util/connection.py b/urllib3/util/connection.py
--- a/urllib3/util/connection.py
+++ b/urllib3/util/connection.py
@@ -46,6 +46,8 @@
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
+# One additional modification is that we avoid binding to IPv6 servers
+# discovered in DNS if the system doesn't have IPv6 functionality.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
@@ -64,14 +66,19 @@
if host.startswith('['):
host = host.strip('[]')
err = None
- for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
+
+ # Using the value from allowed_gai_family() in the context of getaddrinfo lets
+ # us select whether to work with IPv4 DNS records, IPv6 records, or both.
+ # The original create_connection function always returns all records.
+ family = allowed_gai_family()
+
+ for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
- # This is the only addition urllib3 makes to this function.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
@@ -99,3 +106,39 @@
for opt in options:
sock.setsockopt(*opt)
+
+
+def allowed_gai_family():
+ """This function is designed to work in the context of
+ getaddrinfo, where family=socket.AF_UNSPEC is the default and
+ will perform a DNS search for both IPv6 and IPv4 records."""
+
+ family = socket.AF_INET
+ if HAS_IPV6:
+ family = socket.AF_UNSPEC
+ return family
+
+
+def _has_ipv6(host):
+ """ Returns True if the system can bind an IPv6 address. """
+ sock = None
+ has_ipv6 = False
+
+ if socket.has_ipv6:
+ # has_ipv6 returns true if cPython was compiled with IPv6 support.
+ # It does not tell us if the system has IPv6 support enabled. To
+ # determine that we must bind to an IPv6 address.
+ # https://github.com/shazow/urllib3/pull/611
+ # https://bugs.python.org/issue658327
+ try:
+ sock = socket.socket(socket.AF_INET6)
+ sock.bind((host, 0))
+ has_ipv6 = True
+ except Exception:
+ pass
+
+ if sock:
+ sock.close()
+ return has_ipv6
+
+HAS_IPV6 = _has_ipv6('::1')
| {"golden_diff": "diff --git a/urllib3/util/connection.py b/urllib3/util/connection.py\n--- a/urllib3/util/connection.py\n+++ b/urllib3/util/connection.py\n@@ -46,6 +46,8 @@\n \n # This function is copied from socket.py in the Python 2.7 standard\n # library test suite. Added to its signature is only `socket_options`.\n+# One additional modification is that we avoid binding to IPv6 servers\n+# discovered in DNS if the system doesn't have IPv6 functionality.\n def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,\n source_address=None, socket_options=None):\n \"\"\"Connect to *address* and return the socket object.\n@@ -64,14 +66,19 @@\n if host.startswith('['):\n host = host.strip('[]')\n err = None\n- for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):\n+\n+ # Using the value from allowed_gai_family() in the context of getaddrinfo lets\n+ # us select whether to work with IPv4 DNS records, IPv6 records, or both.\n+ # The original create_connection function always returns all records.\n+ family = allowed_gai_family()\n+\n+ for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):\n af, socktype, proto, canonname, sa = res\n sock = None\n try:\n sock = socket.socket(af, socktype, proto)\n \n # If provided, set socket level options before connecting.\n- # This is the only addition urllib3 makes to this function.\n _set_socket_options(sock, socket_options)\n \n if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:\n@@ -99,3 +106,39 @@\n \n for opt in options:\n sock.setsockopt(*opt)\n+\n+\n+def allowed_gai_family():\n+ \"\"\"This function is designed to work in the context of\n+ getaddrinfo, where family=socket.AF_UNSPEC is the default and\n+ will perform a DNS search for both IPv6 and IPv4 records.\"\"\"\n+\n+ family = socket.AF_INET\n+ if HAS_IPV6:\n+ family = socket.AF_UNSPEC\n+ return family\n+\n+\n+def _has_ipv6(host):\n+ \"\"\" Returns True if the system can bind an IPv6 address. \"\"\"\n+ sock = None\n+ has_ipv6 = False\n+\n+ if socket.has_ipv6:\n+ # has_ipv6 returns true if cPython was compiled with IPv6 support.\n+ # It does not tell us if the system has IPv6 support enabled. To\n+ # determine that we must bind to an IPv6 address.\n+ # https://github.com/shazow/urllib3/pull/611\n+ # https://bugs.python.org/issue658327\n+ try:\n+ sock = socket.socket(socket.AF_INET6)\n+ sock.bind((host, 0))\n+ has_ipv6 = True\n+ except Exception:\n+ pass\n+\n+ if sock:\n+ sock.close()\n+ return has_ipv6\n+\n+HAS_IPV6 = _has_ipv6('::1')\n", "issue": "urllib3 attempts to use IPv6 even when IPv6 is disabled\nThis is an issue when running on a server without IPv6 (must be disabled because the network does not support it). Example when connecting to https://graph.facebook.com using requests and IPv4 happens to fail:\n\n```\nHTTPSConnectionPool(host='graph.facebook.com', port=443): Max retries exceeded with url: /v2.5/me/feed (Caused by NewConnectionError('<requests.packages.urllib3.connection.VerifiedHTTPSConnection object at 0x7f4dbd158518>: Failed to establish a new connection: [Errno 97] Address family not supported by protocol',))\nTraceback (most recent call last):\n File \"/home/lib/python3.4/site-packages/requests/packages/urllib3/connection.py\", line 137, in _new_conn\n (self.host, self.port), self.timeout, **extra_kw)\n File \"/home/lib/python3.4/site-packages/requests/packages/urllib3/util/connection.py\", line 91, in create_connection\n raise err\n File \"/home/lib/python3.4/site-packages/requests/packages/urllib3/util/connection.py\", line 71, in create_connection\n sock = socket.socket(af, socktype, proto)\n File \"/usr/lib/python3.4/socket.py\", line 126, in __init__\n _socket.socket.__init__(self, family, type, proto, fileno)\nOSError: [Errno 97] Address family not supported by protocol\n```\n\nurllib3 should throw an exception after exhausting all IPv4 options instead of trying (and invariably failing) IPv6.\n\nSee closed issue https://github.com/kennethreitz/requests/issues/3084.\n\n", "before_files": [{"content": "from __future__ import absolute_import\nimport socket\ntry:\n from select import poll, POLLIN\nexcept ImportError: # `poll` doesn't exist on OSX and other platforms\n poll = False\n try:\n from select import select\n except ImportError: # `select` doesn't exist on AppEngine.\n select = False\n\n\ndef is_connection_dropped(conn): # Platform-specific\n \"\"\"\n Returns True if the connection is dropped and should be closed.\n\n :param conn:\n :class:`httplib.HTTPConnection` object.\n\n Note: For platforms like AppEngine, this will always return ``False`` to\n let the platform handle connection recycling transparently for us.\n \"\"\"\n sock = getattr(conn, 'sock', False)\n if sock is False: # Platform-specific: AppEngine\n return False\n if sock is None: # Connection already closed (such as by httplib).\n return True\n\n if not poll:\n if not select: # Platform-specific: AppEngine\n return False\n\n try:\n return select([sock], [], [], 0.0)[0]\n except socket.error:\n return True\n\n # This version is better on platforms that support it.\n p = poll()\n p.register(sock, POLLIN)\n for (fno, ev) in p.poll(0.0):\n if fno == sock.fileno():\n # Either data is buffered (bad), or the connection is dropped.\n return True\n\n\n# This function is copied from socket.py in the Python 2.7 standard\n# library test suite. Added to its signature is only `socket_options`.\ndef create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,\n source_address=None, socket_options=None):\n \"\"\"Connect to *address* and return the socket object.\n\n Convenience function. Connect to *address* (a 2-tuple ``(host,\n port)``) and return the socket object. Passing the optional\n *timeout* parameter will set the timeout on the socket instance\n before attempting to connect. If no *timeout* is supplied, the\n global default timeout setting returned by :func:`getdefaulttimeout`\n is used. If *source_address* is set it must be a tuple of (host, port)\n for the socket to bind as a source address before making the connection.\n An host of '' or port 0 tells the OS to use the default.\n \"\"\"\n\n host, port = address\n if host.startswith('['):\n host = host.strip('[]')\n err = None\n for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):\n af, socktype, proto, canonname, sa = res\n sock = None\n try:\n sock = socket.socket(af, socktype, proto)\n\n # If provided, set socket level options before connecting.\n # This is the only addition urllib3 makes to this function.\n _set_socket_options(sock, socket_options)\n\n if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:\n sock.settimeout(timeout)\n if source_address:\n sock.bind(source_address)\n sock.connect(sa)\n return sock\n\n except socket.error as e:\n err = e\n if sock is not None:\n sock.close()\n sock = None\n\n if err is not None:\n raise err\n\n raise socket.error(\"getaddrinfo returns an empty list\")\n\n\ndef _set_socket_options(sock, options):\n if options is None:\n return\n\n for opt in options:\n sock.setsockopt(*opt)\n", "path": "urllib3/util/connection.py"}], "after_files": [{"content": "from __future__ import absolute_import\nimport socket\ntry:\n from select import poll, POLLIN\nexcept ImportError: # `poll` doesn't exist on OSX and other platforms\n poll = False\n try:\n from select import select\n except ImportError: # `select` doesn't exist on AppEngine.\n select = False\n\n\ndef is_connection_dropped(conn): # Platform-specific\n \"\"\"\n Returns True if the connection is dropped and should be closed.\n\n :param conn:\n :class:`httplib.HTTPConnection` object.\n\n Note: For platforms like AppEngine, this will always return ``False`` to\n let the platform handle connection recycling transparently for us.\n \"\"\"\n sock = getattr(conn, 'sock', False)\n if sock is False: # Platform-specific: AppEngine\n return False\n if sock is None: # Connection already closed (such as by httplib).\n return True\n\n if not poll:\n if not select: # Platform-specific: AppEngine\n return False\n\n try:\n return select([sock], [], [], 0.0)[0]\n except socket.error:\n return True\n\n # This version is better on platforms that support it.\n p = poll()\n p.register(sock, POLLIN)\n for (fno, ev) in p.poll(0.0):\n if fno == sock.fileno():\n # Either data is buffered (bad), or the connection is dropped.\n return True\n\n\n# This function is copied from socket.py in the Python 2.7 standard\n# library test suite. Added to its signature is only `socket_options`.\n# One additional modification is that we avoid binding to IPv6 servers\n# discovered in DNS if the system doesn't have IPv6 functionality.\ndef create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,\n source_address=None, socket_options=None):\n \"\"\"Connect to *address* and return the socket object.\n\n Convenience function. Connect to *address* (a 2-tuple ``(host,\n port)``) and return the socket object. Passing the optional\n *timeout* parameter will set the timeout on the socket instance\n before attempting to connect. If no *timeout* is supplied, the\n global default timeout setting returned by :func:`getdefaulttimeout`\n is used. If *source_address* is set it must be a tuple of (host, port)\n for the socket to bind as a source address before making the connection.\n An host of '' or port 0 tells the OS to use the default.\n \"\"\"\n\n host, port = address\n if host.startswith('['):\n host = host.strip('[]')\n err = None\n\n # Using the value from allowed_gai_family() in the context of getaddrinfo lets\n # us select whether to work with IPv4 DNS records, IPv6 records, or both.\n # The original create_connection function always returns all records.\n family = allowed_gai_family()\n\n for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):\n af, socktype, proto, canonname, sa = res\n sock = None\n try:\n sock = socket.socket(af, socktype, proto)\n\n # If provided, set socket level options before connecting.\n _set_socket_options(sock, socket_options)\n\n if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:\n sock.settimeout(timeout)\n if source_address:\n sock.bind(source_address)\n sock.connect(sa)\n return sock\n\n except socket.error as e:\n err = e\n if sock is not None:\n sock.close()\n sock = None\n\n if err is not None:\n raise err\n\n raise socket.error(\"getaddrinfo returns an empty list\")\n\n\ndef _set_socket_options(sock, options):\n if options is None:\n return\n\n for opt in options:\n sock.setsockopt(*opt)\n\n\ndef allowed_gai_family():\n \"\"\"This function is designed to work in the context of\n getaddrinfo, where family=socket.AF_UNSPEC is the default and\n will perform a DNS search for both IPv6 and IPv4 records.\"\"\"\n\n family = socket.AF_INET\n if HAS_IPV6:\n family = socket.AF_UNSPEC\n return family\n\n\ndef _has_ipv6(host):\n \"\"\" Returns True if the system can bind an IPv6 address. \"\"\"\n sock = None\n has_ipv6 = False\n\n if socket.has_ipv6:\n # has_ipv6 returns true if cPython was compiled with IPv6 support.\n # It does not tell us if the system has IPv6 support enabled. To\n # determine that we must bind to an IPv6 address.\n # https://github.com/shazow/urllib3/pull/611\n # https://bugs.python.org/issue658327\n try:\n sock = socket.socket(socket.AF_INET6)\n sock.bind((host, 0))\n has_ipv6 = True\n except Exception:\n pass\n\n if sock:\n sock.close()\n return has_ipv6\n\nHAS_IPV6 = _has_ipv6('::1')\n", "path": "urllib3/util/connection.py"}]} | 1,620 | 700 |
gh_patches_debug_57104 | rasdani/github-patches | git_diff | pyro-ppl__pyro-1704 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
make test: no attribute 'optim' error in 'examples/contrib/oed/ab_test.py'
### Issue Description
On the latest dev branch, `make test` gives the following error:
_
examples/contrib/oed/ab_test.py:12: in <module>
from gp_bayes_opt import GPBayesOptimizer
examples/contrib/oed/gp_bayes_opt.py:11: in <module>
class GPBayesOptimizer(pyro.optim.multi.MultiOptimizer):
E AttributeError: module 'pyro' has no attribute 'optim'
### Environment
For any bugs, please provide the following:
- OS and python version: CentOS Linux 7 (Core); Python 3.7.1
- PyTorch version, or if relevant, output of `pip freeze`: PyTorch 1.0.0
- Pyro version: output of `python -c 'import pyro; print pyro.__version__'`: pyro 0.3.0+9adbdb7
### Code Snippet
```
make install
make format
make test
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/contrib/oed/gp_bayes_opt.py`
Content:
```
1 import torch
2 import torch.autograd as autograd
3 import torch.optim as optim
4 from torch.distributions import transform_to
5
6 import pyro
7 import pyro.contrib.gp as gp
8 from pyro.infer import TraceEnum_ELBO
9
10
11 class GPBayesOptimizer(pyro.optim.multi.MultiOptimizer):
12 """Performs Bayesian Optimization using a Gaussian Process as an
13 emulator for the unknown function.
14 """
15
16 def __init__(self, constraints, gpmodel, num_acquisitions, acquisition_func=None):
17 """
18 :param torch.constraint constraints: constraints defining the domain of `f`
19 :param gp.models.GPRegression gpmodel: a (possibly initialized) GP
20 regression model. The kernel, etc is specified via `gpmodel`.
21 :param int num_acquisitions: number of points to acquire at each step
22 :param function acquisition_func: a function to generate acquisitions.
23 It should return a torch.Tensor of new points to query.
24 """
25 if acquisition_func is None:
26 acquisition_func = self.acquire_thompson
27
28 self.constraints = constraints
29 self.gpmodel = gpmodel
30 self.num_acquisitions = num_acquisitions
31 self.acquisition_func = acquisition_func
32
33 def update_posterior(self, X, y):
34 X = torch.cat([self.gpmodel.X, X])
35 y = torch.cat([self.gpmodel.y, y])
36 self.gpmodel.set_data(X, y)
37 optimizer = torch.optim.Adam(self.gpmodel.parameters(), lr=0.001)
38 gp.util.train(self.gpmodel, optimizer,
39 loss_fn=TraceEnum_ELBO(strict_enumeration_warning=False).differentiable_loss,
40 retain_graph=True)
41
42 def find_a_candidate(self, differentiable, x_init):
43 """Given a starting point, `x_init`, takes one LBFGS step
44 to optimize the differentiable function.
45
46 :param function differentiable: a function amenable to torch
47 autograd
48 :param torch.Tensor x_init: the initial point
49
50 """
51 # transform x to an unconstrained domain
52 unconstrained_x_init = transform_to(self.constraints).inv(x_init)
53 unconstrained_x = unconstrained_x_init.detach().clone().requires_grad_(True)
54 # TODO: Use LBFGS with line search by pytorch #8824 merged
55 minimizer = optim.LBFGS([unconstrained_x], max_eval=20)
56
57 def closure():
58 minimizer.zero_grad()
59 if (torch.log(torch.abs(unconstrained_x)) > 25.).any():
60 return torch.tensor(float('inf'))
61 x = transform_to(self.constraints)(unconstrained_x)
62 y = differentiable(x)
63 autograd.backward(unconstrained_x,
64 autograd.grad(y, unconstrained_x, retain_graph=True))
65 return y
66
67 minimizer.step(closure)
68 # after finding a candidate in the unconstrained domain,
69 # convert it back to original domain.
70 x = transform_to(self.constraints)(unconstrained_x)
71 opt_y = differentiable(x)
72 return x.detach(), opt_y.detach()
73
74 def opt_differentiable(self, differentiable, num_candidates=5):
75 """Optimizes a differentiable function by choosing `num_candidates`
76 initial points at random and calling :func:`find_a_candidate` on
77 each. The best candidate is returned with its function value.
78
79 :param function differentiable: a function amenable to torch autograd
80 :param int num_candidates: the number of random starting points to
81 use
82 :return: the minimiser and its function value
83 :rtype: tuple
84 """
85
86 candidates = []
87 values = []
88 for j in range(num_candidates):
89 x_init = self.gpmodel.X.new_empty(1).uniform_(
90 self.constraints.lower_bound, self.constraints.upper_bound)
91 x, y = self.find_a_candidate(differentiable, x_init)
92 if torch.isnan(y):
93 continue
94 candidates.append(x)
95 values.append(y)
96
97 mvalue, argmin = torch.min(torch.cat(values), dim=0)
98 return candidates[argmin.item()], mvalue
99
100 def acquire_thompson(self, num_acquisitions=1, **opt_params):
101 """Selects `num_acquisitions` query points at which to query the
102 original function by Thompson sampling.
103
104 :param int num_acquisitions: the number of points to generate
105 :param dict opt_params: additional parameters for optimization
106 routines
107 :return: a tensor of points to evaluate `loss` at
108 :rtype: torch.Tensor
109 """
110
111 # Initialize the return tensor
112 X = self.gpmodel.X.new_empty(num_acquisitions, *self.gpmodel.X.shape[1:])
113
114 for i in range(num_acquisitions):
115 sampler = self.gpmodel.iter_sample(noiseless=False)
116 x, _ = self.opt_differentiable(sampler, **opt_params)
117 X[i, ...] = x
118
119 return X
120
121 def get_step(self, loss, params, verbose=False):
122 X = self.acquisition_func(num_acquisitions=self.num_acquisitions)
123 y = loss(X)
124 if verbose:
125 print("Acquire at: X")
126 print(X)
127 print("y")
128 print(y)
129 self.update_posterior(X, y)
130 return self.opt_differentiable(lambda x: self.gpmodel(x)[0])
131
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/contrib/oed/gp_bayes_opt.py b/examples/contrib/oed/gp_bayes_opt.py
--- a/examples/contrib/oed/gp_bayes_opt.py
+++ b/examples/contrib/oed/gp_bayes_opt.py
@@ -3,9 +3,9 @@
import torch.optim as optim
from torch.distributions import transform_to
-import pyro
import pyro.contrib.gp as gp
from pyro.infer import TraceEnum_ELBO
+import pyro.optim
class GPBayesOptimizer(pyro.optim.multi.MultiOptimizer):
| {"golden_diff": "diff --git a/examples/contrib/oed/gp_bayes_opt.py b/examples/contrib/oed/gp_bayes_opt.py\n--- a/examples/contrib/oed/gp_bayes_opt.py\n+++ b/examples/contrib/oed/gp_bayes_opt.py\n@@ -3,9 +3,9 @@\n import torch.optim as optim\n from torch.distributions import transform_to\n \n-import pyro\n import pyro.contrib.gp as gp\n from pyro.infer import TraceEnum_ELBO\n+import pyro.optim\n \n \n class GPBayesOptimizer(pyro.optim.multi.MultiOptimizer):\n", "issue": "make test: no attribute 'optim' error in 'examples/contrib/oed/ab_test.py' \n### Issue Description\r\nOn the latest dev branch, `make test` gives the following error:\r\n\r\n_\r\nexamples/contrib/oed/ab_test.py:12: in <module>\r\n from gp_bayes_opt import GPBayesOptimizer\r\nexamples/contrib/oed/gp_bayes_opt.py:11: in <module>\r\n class GPBayesOptimizer(pyro.optim.multi.MultiOptimizer):\r\nE AttributeError: module 'pyro' has no attribute 'optim'\r\n\r\n### Environment\r\nFor any bugs, please provide the following:\r\n - OS and python version: CentOS Linux 7 (Core); Python 3.7.1\r\n - PyTorch version, or if relevant, output of `pip freeze`: PyTorch 1.0.0\r\n - Pyro version: output of `python -c 'import pyro; print pyro.__version__'`: pyro 0.3.0+9adbdb7\r\n\r\n### Code Snippet\r\n\r\n```\r\nmake install\r\nmake format\r\nmake test\r\n```\r\n\n", "before_files": [{"content": "import torch\nimport torch.autograd as autograd\nimport torch.optim as optim\nfrom torch.distributions import transform_to\n\nimport pyro\nimport pyro.contrib.gp as gp\nfrom pyro.infer import TraceEnum_ELBO\n\n\nclass GPBayesOptimizer(pyro.optim.multi.MultiOptimizer):\n \"\"\"Performs Bayesian Optimization using a Gaussian Process as an\n emulator for the unknown function.\n \"\"\"\n\n def __init__(self, constraints, gpmodel, num_acquisitions, acquisition_func=None):\n \"\"\"\n :param torch.constraint constraints: constraints defining the domain of `f`\n :param gp.models.GPRegression gpmodel: a (possibly initialized) GP\n regression model. The kernel, etc is specified via `gpmodel`.\n :param int num_acquisitions: number of points to acquire at each step\n :param function acquisition_func: a function to generate acquisitions.\n It should return a torch.Tensor of new points to query.\n \"\"\"\n if acquisition_func is None:\n acquisition_func = self.acquire_thompson\n\n self.constraints = constraints\n self.gpmodel = gpmodel\n self.num_acquisitions = num_acquisitions\n self.acquisition_func = acquisition_func\n\n def update_posterior(self, X, y):\n X = torch.cat([self.gpmodel.X, X])\n y = torch.cat([self.gpmodel.y, y])\n self.gpmodel.set_data(X, y)\n optimizer = torch.optim.Adam(self.gpmodel.parameters(), lr=0.001)\n gp.util.train(self.gpmodel, optimizer,\n loss_fn=TraceEnum_ELBO(strict_enumeration_warning=False).differentiable_loss,\n retain_graph=True)\n\n def find_a_candidate(self, differentiable, x_init):\n \"\"\"Given a starting point, `x_init`, takes one LBFGS step\n to optimize the differentiable function.\n\n :param function differentiable: a function amenable to torch\n autograd\n :param torch.Tensor x_init: the initial point\n\n \"\"\"\n # transform x to an unconstrained domain\n unconstrained_x_init = transform_to(self.constraints).inv(x_init)\n unconstrained_x = unconstrained_x_init.detach().clone().requires_grad_(True)\n # TODO: Use LBFGS with line search by pytorch #8824 merged\n minimizer = optim.LBFGS([unconstrained_x], max_eval=20)\n\n def closure():\n minimizer.zero_grad()\n if (torch.log(torch.abs(unconstrained_x)) > 25.).any():\n return torch.tensor(float('inf'))\n x = transform_to(self.constraints)(unconstrained_x)\n y = differentiable(x)\n autograd.backward(unconstrained_x,\n autograd.grad(y, unconstrained_x, retain_graph=True))\n return y\n\n minimizer.step(closure)\n # after finding a candidate in the unconstrained domain,\n # convert it back to original domain.\n x = transform_to(self.constraints)(unconstrained_x)\n opt_y = differentiable(x)\n return x.detach(), opt_y.detach()\n\n def opt_differentiable(self, differentiable, num_candidates=5):\n \"\"\"Optimizes a differentiable function by choosing `num_candidates`\n initial points at random and calling :func:`find_a_candidate` on\n each. The best candidate is returned with its function value.\n\n :param function differentiable: a function amenable to torch autograd\n :param int num_candidates: the number of random starting points to\n use\n :return: the minimiser and its function value\n :rtype: tuple\n \"\"\"\n\n candidates = []\n values = []\n for j in range(num_candidates):\n x_init = self.gpmodel.X.new_empty(1).uniform_(\n self.constraints.lower_bound, self.constraints.upper_bound)\n x, y = self.find_a_candidate(differentiable, x_init)\n if torch.isnan(y):\n continue\n candidates.append(x)\n values.append(y)\n\n mvalue, argmin = torch.min(torch.cat(values), dim=0)\n return candidates[argmin.item()], mvalue\n\n def acquire_thompson(self, num_acquisitions=1, **opt_params):\n \"\"\"Selects `num_acquisitions` query points at which to query the\n original function by Thompson sampling.\n\n :param int num_acquisitions: the number of points to generate\n :param dict opt_params: additional parameters for optimization\n routines\n :return: a tensor of points to evaluate `loss` at\n :rtype: torch.Tensor\n \"\"\"\n\n # Initialize the return tensor\n X = self.gpmodel.X.new_empty(num_acquisitions, *self.gpmodel.X.shape[1:])\n\n for i in range(num_acquisitions):\n sampler = self.gpmodel.iter_sample(noiseless=False)\n x, _ = self.opt_differentiable(sampler, **opt_params)\n X[i, ...] = x\n\n return X\n\n def get_step(self, loss, params, verbose=False):\n X = self.acquisition_func(num_acquisitions=self.num_acquisitions)\n y = loss(X)\n if verbose:\n print(\"Acquire at: X\")\n print(X)\n print(\"y\")\n print(y)\n self.update_posterior(X, y)\n return self.opt_differentiable(lambda x: self.gpmodel(x)[0])\n", "path": "examples/contrib/oed/gp_bayes_opt.py"}], "after_files": [{"content": "import torch\nimport torch.autograd as autograd\nimport torch.optim as optim\nfrom torch.distributions import transform_to\n\nimport pyro.contrib.gp as gp\nfrom pyro.infer import TraceEnum_ELBO\nimport pyro.optim\n\n\nclass GPBayesOptimizer(pyro.optim.multi.MultiOptimizer):\n \"\"\"Performs Bayesian Optimization using a Gaussian Process as an\n emulator for the unknown function.\n \"\"\"\n\n def __init__(self, constraints, gpmodel, num_acquisitions, acquisition_func=None):\n \"\"\"\n :param torch.constraint constraints: constraints defining the domain of `f`\n :param gp.models.GPRegression gpmodel: a (possibly initialized) GP\n regression model. The kernel, etc is specified via `gpmodel`.\n :param int num_acquisitions: number of points to acquire at each step\n :param function acquisition_func: a function to generate acquisitions.\n It should return a torch.Tensor of new points to query.\n \"\"\"\n if acquisition_func is None:\n acquisition_func = self.acquire_thompson\n\n self.constraints = constraints\n self.gpmodel = gpmodel\n self.num_acquisitions = num_acquisitions\n self.acquisition_func = acquisition_func\n\n def update_posterior(self, X, y):\n X = torch.cat([self.gpmodel.X, X])\n y = torch.cat([self.gpmodel.y, y])\n self.gpmodel.set_data(X, y)\n optimizer = torch.optim.Adam(self.gpmodel.parameters(), lr=0.001)\n gp.util.train(self.gpmodel, optimizer,\n loss_fn=TraceEnum_ELBO(strict_enumeration_warning=False).differentiable_loss,\n retain_graph=True)\n\n def find_a_candidate(self, differentiable, x_init):\n \"\"\"Given a starting point, `x_init`, takes one LBFGS step\n to optimize the differentiable function.\n\n :param function differentiable: a function amenable to torch\n autograd\n :param torch.Tensor x_init: the initial point\n\n \"\"\"\n # transform x to an unconstrained domain\n unconstrained_x_init = transform_to(self.constraints).inv(x_init)\n unconstrained_x = unconstrained_x_init.detach().clone().requires_grad_(True)\n # TODO: Use LBFGS with line search by pytorch #8824 merged\n minimizer = optim.LBFGS([unconstrained_x], max_eval=20)\n\n def closure():\n minimizer.zero_grad()\n if (torch.log(torch.abs(unconstrained_x)) > 25.).any():\n return torch.tensor(float('inf'))\n x = transform_to(self.constraints)(unconstrained_x)\n y = differentiable(x)\n autograd.backward(unconstrained_x,\n autograd.grad(y, unconstrained_x, retain_graph=True))\n return y\n\n minimizer.step(closure)\n # after finding a candidate in the unconstrained domain,\n # convert it back to original domain.\n x = transform_to(self.constraints)(unconstrained_x)\n opt_y = differentiable(x)\n return x.detach(), opt_y.detach()\n\n def opt_differentiable(self, differentiable, num_candidates=5):\n \"\"\"Optimizes a differentiable function by choosing `num_candidates`\n initial points at random and calling :func:`find_a_candidate` on\n each. The best candidate is returned with its function value.\n\n :param function differentiable: a function amenable to torch autograd\n :param int num_candidates: the number of random starting points to\n use\n :return: the minimiser and its function value\n :rtype: tuple\n \"\"\"\n\n candidates = []\n values = []\n for j in range(num_candidates):\n x_init = self.gpmodel.X.new_empty(1).uniform_(\n self.constraints.lower_bound, self.constraints.upper_bound)\n x, y = self.find_a_candidate(differentiable, x_init)\n if torch.isnan(y):\n continue\n candidates.append(x)\n values.append(y)\n\n mvalue, argmin = torch.min(torch.cat(values), dim=0)\n return candidates[argmin.item()], mvalue\n\n def acquire_thompson(self, num_acquisitions=1, **opt_params):\n \"\"\"Selects `num_acquisitions` query points at which to query the\n original function by Thompson sampling.\n\n :param int num_acquisitions: the number of points to generate\n :param dict opt_params: additional parameters for optimization\n routines\n :return: a tensor of points to evaluate `loss` at\n :rtype: torch.Tensor\n \"\"\"\n\n # Initialize the return tensor\n X = self.gpmodel.X.new_empty(num_acquisitions, *self.gpmodel.X.shape[1:])\n\n for i in range(num_acquisitions):\n sampler = self.gpmodel.iter_sample(noiseless=False)\n x, _ = self.opt_differentiable(sampler, **opt_params)\n X[i, ...] = x\n\n return X\n\n def get_step(self, loss, params, verbose=False):\n X = self.acquisition_func(num_acquisitions=self.num_acquisitions)\n y = loss(X)\n if verbose:\n print(\"Acquire at: X\")\n print(X)\n print(\"y\")\n print(y)\n self.update_posterior(X, y)\n return self.opt_differentiable(lambda x: self.gpmodel(x)[0])\n", "path": "examples/contrib/oed/gp_bayes_opt.py"}]} | 1,942 | 127 |
gh_patches_debug_23563 | rasdani/github-patches | git_diff | getsentry__snuba-558 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support Redis Authentication
I'm trying to install Snuba on my Kubernetes instance alongside Sentry.
Sentry's Helm chart installs Redis with a password (It generates a secret), and there was no option for me to specify that password for Snuba.
I opened up the source code and it looks like a simple solution:
Another setting (REDIS_PASSWORD) that would be passed to startup_nodes and to StrictRedis' constructor on the snuba/redis.py module.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snuba/settings_docker.py`
Content:
```
1 import os
2 from snuba.settings_base import *
3
4 env = os.environ.get
5
6 DEBUG = env('DEBUG', '0').lower() in ('1', 'true')
7
8 DEFAULT_BROKERS = env('DEFAULT_BROKERS', 'localhost:9092').split(',')
9
10 REDIS_HOST = env('REDIS_HOST', 'localhost')
11 REDIS_PORT = int(env('REDIS_PORT', 6379))
12 REDIS_DB = int(env('REDIS_DB', 1))
13 USE_REDIS_CLUSTER = False
14
```
Path: `snuba/settings_base.py`
Content:
```
1 import os
2
3 LOG_LEVEL = os.environ.get('LOG_LEVEL', 'INFO')
4
5 TESTING = False
6 DEBUG = True
7
8 PORT = 1218
9
10 DEFAULT_DATASET_NAME = 'events'
11 DISABLED_DATASETS = {}
12 DATASET_MODE = 'local'
13
14 # Clickhouse Options
15 # TODO: Warn about using `CLICKHOUSE_SERVER`, users should use the new settings instead.
16 [default_clickhouse_host, default_clickhouse_port] = os.environ.get('CLICKHOUSE_SERVER', 'localhost:9000').split(':', 1)
17 CLICKHOUSE_HOST = os.environ.get('CLICKHOUSE_HOST', default_clickhouse_host)
18 CLICKHOUSE_PORT = int(os.environ.get('CLICKHOUSE_PORT', default_clickhouse_port))
19 CLICKHOUSE_HTTP_PORT = int(os.environ.get('CLICKHOUSE_HTTP_PORT', 8123))
20 CLICKHOUSE_MAX_POOL_SIZE = 25
21
22 # Dogstatsd Options
23 DOGSTATSD_HOST = 'localhost'
24 DOGSTATSD_PORT = 8125
25
26 # Redis Options
27 USE_REDIS_CLUSTER = False
28 REDIS_CLUSTER_STARTUP_NODES = None
29 REDIS_HOST = os.environ.get('REDIS_HOST', 'localhost')
30 REDIS_PORT = 6379
31 REDIS_DB = 1
32
33 # Query Recording Options
34 RECORD_QUERIES = False
35 QUERIES_TOPIC = 'snuba-queries'
36
37 # Runtime Config Options
38 CONFIG_MEMOIZE_TIMEOUT = 10
39
40 # Sentry Options
41 SENTRY_DSN = None
42
43 # Snuba Options
44
45 SNAPSHOT_LOAD_PRODUCT = 'snuba'
46
47 SNAPSHOT_CONTROL_TOPIC_INIT_TIMEOUT = 30
48 BULK_CLICKHOUSE_BUFFER = 10000
49
50 # Processor/Writer Options
51 DEFAULT_BROKERS = ['localhost:9092']
52 DEFAULT_DATASET_BROKERS = {}
53
54 DEFAULT_MAX_BATCH_SIZE = 50000
55 DEFAULT_MAX_BATCH_TIME_MS = 2 * 1000
56 DEFAULT_QUEUED_MAX_MESSAGE_KBYTES = 10000
57 DEFAULT_QUEUED_MIN_MESSAGES = 10000
58 DISCARD_OLD_EVENTS = True
59
60 DEFAULT_RETENTION_DAYS = 90
61 RETENTION_OVERRIDES = {}
62
63 MAX_PREWHERE_CONDITIONS = 1
64
65 STATS_IN_RESPONSE = False
66
67 PAYLOAD_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%fZ"
68
69 REPLACER_MAX_BLOCK_SIZE = 512
70 REPLACER_MAX_MEMORY_USAGE = 10 * (1024**3) # 10GB
71 # TLL of Redis key that denotes whether a project had replacements
72 # run recently. Useful for decidig whether or not to add FINAL clause
73 # to queries.
74 REPLACER_KEY_TTL = 12 * 60 * 60
75 REPLACER_MAX_GROUP_IDS_TO_EXCLUDE = 256
76
77 TURBO_SAMPLE_RATE = 0.1
78
```
Path: `snuba/redis.py`
Content:
```
1 from __future__ import absolute_import
2
3 from redis.client import StrictRedis
4 from redis.exceptions import BusyLoadingError, ConnectionError
5 from rediscluster import StrictRedisCluster
6
7 from snuba import settings
8
9
10 class RetryingStrictRedisCluster(StrictRedisCluster):
11 """
12 Execute a command with cluster reinitialization retry logic.
13 Should a cluster respond with a ConnectionError or BusyLoadingError the
14 cluster nodes list will be reinitialized and the command will be executed
15 again with the most up to date view of the world.
16 """
17
18 def execute_command(self, *args, **kwargs):
19 try:
20 return super(self.__class__, self).execute_command(*args, **kwargs)
21 except (
22 ConnectionError,
23 BusyLoadingError,
24 KeyError # see: https://github.com/Grokzen/redis-py-cluster/issues/287
25 ):
26 self.connection_pool.nodes.reset()
27 return super(self.__class__, self).execute_command(*args, **kwargs)
28
29
30 if settings.USE_REDIS_CLUSTER:
31 startup_nodes = settings.REDIS_CLUSTER_STARTUP_NODES
32 if startup_nodes is None:
33 startup_nodes = [{
34 'host': settings.REDIS_HOST,
35 'port': settings.REDIS_PORT,
36 }]
37 redis_client = RetryingStrictRedisCluster(
38 startup_nodes=startup_nodes,
39 socket_keepalive=True,
40 )
41 else:
42 redis_client = StrictRedis(
43 host=settings.REDIS_HOST,
44 port=settings.REDIS_PORT,
45 db=settings.REDIS_DB,
46 socket_keepalive=True,
47 )
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/snuba/redis.py b/snuba/redis.py
--- a/snuba/redis.py
+++ b/snuba/redis.py
@@ -37,11 +37,13 @@
redis_client = RetryingStrictRedisCluster(
startup_nodes=startup_nodes,
socket_keepalive=True,
+ password=settings.REDIS_PASSWORD,
)
else:
redis_client = StrictRedis(
host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
+ password=settings.REDIS_PASSWORD,
db=settings.REDIS_DB,
socket_keepalive=True,
)
diff --git a/snuba/settings_base.py b/snuba/settings_base.py
--- a/snuba/settings_base.py
+++ b/snuba/settings_base.py
@@ -28,6 +28,7 @@
REDIS_CLUSTER_STARTUP_NODES = None
REDIS_HOST = os.environ.get('REDIS_HOST', 'localhost')
REDIS_PORT = 6379
+REDIS_PASSWORD = None
REDIS_DB = 1
# Query Recording Options
diff --git a/snuba/settings_docker.py b/snuba/settings_docker.py
--- a/snuba/settings_docker.py
+++ b/snuba/settings_docker.py
@@ -9,5 +9,6 @@
REDIS_HOST = env('REDIS_HOST', 'localhost')
REDIS_PORT = int(env('REDIS_PORT', 6379))
+REDIS_PASSWORD = env('REDIS_PASSWORD')
REDIS_DB = int(env('REDIS_DB', 1))
USE_REDIS_CLUSTER = False
| {"golden_diff": "diff --git a/snuba/redis.py b/snuba/redis.py\n--- a/snuba/redis.py\n+++ b/snuba/redis.py\n@@ -37,11 +37,13 @@\n redis_client = RetryingStrictRedisCluster(\n startup_nodes=startup_nodes,\n socket_keepalive=True,\n+ password=settings.REDIS_PASSWORD,\n )\n else:\n redis_client = StrictRedis(\n host=settings.REDIS_HOST,\n port=settings.REDIS_PORT,\n+ password=settings.REDIS_PASSWORD,\n db=settings.REDIS_DB,\n socket_keepalive=True,\n )\ndiff --git a/snuba/settings_base.py b/snuba/settings_base.py\n--- a/snuba/settings_base.py\n+++ b/snuba/settings_base.py\n@@ -28,6 +28,7 @@\n REDIS_CLUSTER_STARTUP_NODES = None\n REDIS_HOST = os.environ.get('REDIS_HOST', 'localhost')\n REDIS_PORT = 6379\n+REDIS_PASSWORD = None\n REDIS_DB = 1\n \n # Query Recording Options\ndiff --git a/snuba/settings_docker.py b/snuba/settings_docker.py\n--- a/snuba/settings_docker.py\n+++ b/snuba/settings_docker.py\n@@ -9,5 +9,6 @@\n \n REDIS_HOST = env('REDIS_HOST', 'localhost')\n REDIS_PORT = int(env('REDIS_PORT', 6379))\n+REDIS_PASSWORD = env('REDIS_PASSWORD')\n REDIS_DB = int(env('REDIS_DB', 1))\n USE_REDIS_CLUSTER = False\n", "issue": "Support Redis Authentication\nI'm trying to install Snuba on my Kubernetes instance alongside Sentry.\r\nSentry's Helm chart installs Redis with a password (It generates a secret), and there was no option for me to specify that password for Snuba.\r\n\r\nI opened up the source code and it looks like a simple solution: \r\nAnother setting (REDIS_PASSWORD) that would be passed to startup_nodes and to StrictRedis' constructor on the snuba/redis.py module.\n", "before_files": [{"content": "import os\nfrom snuba.settings_base import *\n\nenv = os.environ.get\n\nDEBUG = env('DEBUG', '0').lower() in ('1', 'true')\n\nDEFAULT_BROKERS = env('DEFAULT_BROKERS', 'localhost:9092').split(',')\n\nREDIS_HOST = env('REDIS_HOST', 'localhost')\nREDIS_PORT = int(env('REDIS_PORT', 6379))\nREDIS_DB = int(env('REDIS_DB', 1))\nUSE_REDIS_CLUSTER = False\n", "path": "snuba/settings_docker.py"}, {"content": "import os\n\nLOG_LEVEL = os.environ.get('LOG_LEVEL', 'INFO')\n\nTESTING = False\nDEBUG = True\n\nPORT = 1218\n\nDEFAULT_DATASET_NAME = 'events'\nDISABLED_DATASETS = {}\nDATASET_MODE = 'local'\n\n# Clickhouse Options\n# TODO: Warn about using `CLICKHOUSE_SERVER`, users should use the new settings instead.\n[default_clickhouse_host, default_clickhouse_port] = os.environ.get('CLICKHOUSE_SERVER', 'localhost:9000').split(':', 1)\nCLICKHOUSE_HOST = os.environ.get('CLICKHOUSE_HOST', default_clickhouse_host)\nCLICKHOUSE_PORT = int(os.environ.get('CLICKHOUSE_PORT', default_clickhouse_port))\nCLICKHOUSE_HTTP_PORT = int(os.environ.get('CLICKHOUSE_HTTP_PORT', 8123))\nCLICKHOUSE_MAX_POOL_SIZE = 25\n\n# Dogstatsd Options\nDOGSTATSD_HOST = 'localhost'\nDOGSTATSD_PORT = 8125\n\n# Redis Options\nUSE_REDIS_CLUSTER = False\nREDIS_CLUSTER_STARTUP_NODES = None\nREDIS_HOST = os.environ.get('REDIS_HOST', 'localhost')\nREDIS_PORT = 6379\nREDIS_DB = 1\n\n# Query Recording Options\nRECORD_QUERIES = False\nQUERIES_TOPIC = 'snuba-queries'\n\n# Runtime Config Options\nCONFIG_MEMOIZE_TIMEOUT = 10\n\n# Sentry Options\nSENTRY_DSN = None\n\n# Snuba Options\n\nSNAPSHOT_LOAD_PRODUCT = 'snuba'\n\nSNAPSHOT_CONTROL_TOPIC_INIT_TIMEOUT = 30\nBULK_CLICKHOUSE_BUFFER = 10000\n\n# Processor/Writer Options\nDEFAULT_BROKERS = ['localhost:9092']\nDEFAULT_DATASET_BROKERS = {}\n\nDEFAULT_MAX_BATCH_SIZE = 50000\nDEFAULT_MAX_BATCH_TIME_MS = 2 * 1000\nDEFAULT_QUEUED_MAX_MESSAGE_KBYTES = 10000\nDEFAULT_QUEUED_MIN_MESSAGES = 10000\nDISCARD_OLD_EVENTS = True\n\nDEFAULT_RETENTION_DAYS = 90\nRETENTION_OVERRIDES = {}\n\nMAX_PREWHERE_CONDITIONS = 1\n\nSTATS_IN_RESPONSE = False\n\nPAYLOAD_DATETIME_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\nREPLACER_MAX_BLOCK_SIZE = 512\nREPLACER_MAX_MEMORY_USAGE = 10 * (1024**3) # 10GB\n# TLL of Redis key that denotes whether a project had replacements\n# run recently. Useful for decidig whether or not to add FINAL clause\n# to queries.\nREPLACER_KEY_TTL = 12 * 60 * 60\nREPLACER_MAX_GROUP_IDS_TO_EXCLUDE = 256\n\nTURBO_SAMPLE_RATE = 0.1\n", "path": "snuba/settings_base.py"}, {"content": "from __future__ import absolute_import\n\nfrom redis.client import StrictRedis\nfrom redis.exceptions import BusyLoadingError, ConnectionError\nfrom rediscluster import StrictRedisCluster\n\nfrom snuba import settings\n\n\nclass RetryingStrictRedisCluster(StrictRedisCluster):\n \"\"\"\n Execute a command with cluster reinitialization retry logic.\n Should a cluster respond with a ConnectionError or BusyLoadingError the\n cluster nodes list will be reinitialized and the command will be executed\n again with the most up to date view of the world.\n \"\"\"\n\n def execute_command(self, *args, **kwargs):\n try:\n return super(self.__class__, self).execute_command(*args, **kwargs)\n except (\n ConnectionError,\n BusyLoadingError,\n KeyError # see: https://github.com/Grokzen/redis-py-cluster/issues/287\n ):\n self.connection_pool.nodes.reset()\n return super(self.__class__, self).execute_command(*args, **kwargs)\n\n\nif settings.USE_REDIS_CLUSTER:\n startup_nodes = settings.REDIS_CLUSTER_STARTUP_NODES\n if startup_nodes is None:\n startup_nodes = [{\n 'host': settings.REDIS_HOST,\n 'port': settings.REDIS_PORT,\n }]\n redis_client = RetryingStrictRedisCluster(\n startup_nodes=startup_nodes,\n socket_keepalive=True,\n )\nelse:\n redis_client = StrictRedis(\n host=settings.REDIS_HOST,\n port=settings.REDIS_PORT,\n db=settings.REDIS_DB,\n socket_keepalive=True,\n )\n", "path": "snuba/redis.py"}], "after_files": [{"content": "import os\nfrom snuba.settings_base import *\n\nenv = os.environ.get\n\nDEBUG = env('DEBUG', '0').lower() in ('1', 'true')\n\nDEFAULT_BROKERS = env('DEFAULT_BROKERS', 'localhost:9092').split(',')\n\nREDIS_HOST = env('REDIS_HOST', 'localhost')\nREDIS_PORT = int(env('REDIS_PORT', 6379))\nREDIS_PASSWORD = env('REDIS_PASSWORD')\nREDIS_DB = int(env('REDIS_DB', 1))\nUSE_REDIS_CLUSTER = False\n", "path": "snuba/settings_docker.py"}, {"content": "import os\n\nLOG_LEVEL = os.environ.get('LOG_LEVEL', 'INFO')\n\nTESTING = False\nDEBUG = True\n\nPORT = 1218\n\nDEFAULT_DATASET_NAME = 'events'\nDISABLED_DATASETS = {}\nDATASET_MODE = 'local'\n\n# Clickhouse Options\n# TODO: Warn about using `CLICKHOUSE_SERVER`, users should use the new settings instead.\n[default_clickhouse_host, default_clickhouse_port] = os.environ.get('CLICKHOUSE_SERVER', 'localhost:9000').split(':', 1)\nCLICKHOUSE_HOST = os.environ.get('CLICKHOUSE_HOST', default_clickhouse_host)\nCLICKHOUSE_PORT = int(os.environ.get('CLICKHOUSE_PORT', default_clickhouse_port))\nCLICKHOUSE_HTTP_PORT = int(os.environ.get('CLICKHOUSE_HTTP_PORT', 8123))\nCLICKHOUSE_MAX_POOL_SIZE = 25\n\n# Dogstatsd Options\nDOGSTATSD_HOST = 'localhost'\nDOGSTATSD_PORT = 8125\n\n# Redis Options\nUSE_REDIS_CLUSTER = False\nREDIS_CLUSTER_STARTUP_NODES = None\nREDIS_HOST = os.environ.get('REDIS_HOST', 'localhost')\nREDIS_PORT = 6379\nREDIS_PASSWORD = None\nREDIS_DB = 1\n\n# Query Recording Options\nRECORD_QUERIES = False\nQUERIES_TOPIC = 'snuba-queries'\n\n# Runtime Config Options\nCONFIG_MEMOIZE_TIMEOUT = 10\n\n# Sentry Options\nSENTRY_DSN = None\n\n# Snuba Options\n\nSNAPSHOT_LOAD_PRODUCT = 'snuba'\n\nSNAPSHOT_CONTROL_TOPIC_INIT_TIMEOUT = 30\nBULK_CLICKHOUSE_BUFFER = 10000\n\n# Processor/Writer Options\nDEFAULT_BROKERS = ['localhost:9092']\nDEFAULT_DATASET_BROKERS = {}\n\nDEFAULT_MAX_BATCH_SIZE = 50000\nDEFAULT_MAX_BATCH_TIME_MS = 2 * 1000\nDEFAULT_QUEUED_MAX_MESSAGE_KBYTES = 10000\nDEFAULT_QUEUED_MIN_MESSAGES = 10000\nDISCARD_OLD_EVENTS = True\n\nDEFAULT_RETENTION_DAYS = 90\nRETENTION_OVERRIDES = {}\n\nMAX_PREWHERE_CONDITIONS = 1\n\nSTATS_IN_RESPONSE = False\n\nPAYLOAD_DATETIME_FORMAT = \"%Y-%m-%dT%H:%M:%S.%fZ\"\n\nREPLACER_MAX_BLOCK_SIZE = 512\nREPLACER_MAX_MEMORY_USAGE = 10 * (1024**3) # 10GB\n# TLL of Redis key that denotes whether a project had replacements\n# run recently. Useful for decidig whether or not to add FINAL clause\n# to queries.\nREPLACER_KEY_TTL = 12 * 60 * 60\nREPLACER_MAX_GROUP_IDS_TO_EXCLUDE = 256\n\nTURBO_SAMPLE_RATE = 0.1\n", "path": "snuba/settings_base.py"}, {"content": "from __future__ import absolute_import\n\nfrom redis.client import StrictRedis\nfrom redis.exceptions import BusyLoadingError, ConnectionError\nfrom rediscluster import StrictRedisCluster\n\nfrom snuba import settings\n\n\nclass RetryingStrictRedisCluster(StrictRedisCluster):\n \"\"\"\n Execute a command with cluster reinitialization retry logic.\n Should a cluster respond with a ConnectionError or BusyLoadingError the\n cluster nodes list will be reinitialized and the command will be executed\n again with the most up to date view of the world.\n \"\"\"\n\n def execute_command(self, *args, **kwargs):\n try:\n return super(self.__class__, self).execute_command(*args, **kwargs)\n except (\n ConnectionError,\n BusyLoadingError,\n KeyError # see: https://github.com/Grokzen/redis-py-cluster/issues/287\n ):\n self.connection_pool.nodes.reset()\n return super(self.__class__, self).execute_command(*args, **kwargs)\n\n\nif settings.USE_REDIS_CLUSTER:\n startup_nodes = settings.REDIS_CLUSTER_STARTUP_NODES\n if startup_nodes is None:\n startup_nodes = [{\n 'host': settings.REDIS_HOST,\n 'port': settings.REDIS_PORT,\n }]\n redis_client = RetryingStrictRedisCluster(\n startup_nodes=startup_nodes,\n socket_keepalive=True,\n password=settings.REDIS_PASSWORD,\n )\nelse:\n redis_client = StrictRedis(\n host=settings.REDIS_HOST,\n port=settings.REDIS_PORT,\n password=settings.REDIS_PASSWORD,\n db=settings.REDIS_DB,\n socket_keepalive=True,\n )\n", "path": "snuba/redis.py"}]} | 1,698 | 324 |
gh_patches_debug_3670 | rasdani/github-patches | git_diff | wright-group__WrightTools-753 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Kit leastsq should not except BaseException
https://github.com/wright-group/WrightTools/blob/f22920579f45632b4123661d9832ff0cc1b614c4/WrightTools/kit/_leastsq.py#L74
The exception caught should be limited to those known to be raised inside.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `WrightTools/kit/_leastsq.py`
Content:
```
1 """Least-square fitting tools."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 from ._utilities import Timer
8
9 import numpy as np
10
11 from scipy import optimize as scipy_optimize
12
13
14 # --- define --------------------------------------------------------------------------------------
15
16
17 __all__ = ["leastsqfitter"]
18
19
20 # --- functions -----------------------------------------------------------------------------------
21
22
23 def leastsqfitter(p0, datax, datay, function, verbose=False, cov_verbose=False):
24 """Conveniently call scipy.optmize.leastsq().
25
26 Returns fit parameters and their errors.
27
28 Parameters
29 ----------
30 p0 : list
31 list of guess parameters to pass to function
32 datax : array
33 array of independent values
34 datay : array
35 array of dependent values
36 function : function
37 function object to fit data to. Must be of the callable form function(p, x)
38 verbose : bool
39 toggles printing of fit time, fit params, and fit param errors
40 cov_verbose : bool
41 toggles printing of covarience matrix
42
43 Returns
44 -------
45 pfit_leastsq : list
46 list of fit parameters. s.t. the error between datay and function(p, datax) is minimized
47 perr_leastsq : list
48 list of fit parameter errors (1 std)
49 """
50 timer = Timer(verbose=False)
51 with timer:
52 # define error function
53 def errfunc(p, x, y):
54 return y - function(p, x)
55
56 # run optimization
57 pfit_leastsq, pcov, infodict, errmsg, success = scipy_optimize.leastsq(
58 errfunc, p0, args=(datax, datay), full_output=1, epsfcn=0.0001
59 )
60 # calculate covarience matrix
61 # original idea https://stackoverflow.com/a/21844726
62 if (len(datay) > len(p0)) and pcov is not None:
63 s_sq = (errfunc(pfit_leastsq, datax, datay) ** 2).sum() / (len(datay) - len(p0))
64 pcov = pcov * s_sq
65 if cov_verbose:
66 print(pcov)
67 else:
68 pcov = np.inf
69 # calculate and write errors
70 error = []
71 for i in range(len(pfit_leastsq)):
72 try:
73 error.append(np.absolute(pcov[i][i]) ** 0.5)
74 except BaseException:
75 error.append(0.00)
76 perr_leastsq = np.array(error)
77 # exit
78 if verbose:
79 print("fit params: ", pfit_leastsq)
80 print("fit params error: ", perr_leastsq)
81 print("fitting done in %f seconds" % timer.interval)
82 return pfit_leastsq, perr_leastsq
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/WrightTools/kit/_leastsq.py b/WrightTools/kit/_leastsq.py
--- a/WrightTools/kit/_leastsq.py
+++ b/WrightTools/kit/_leastsq.py
@@ -71,7 +71,7 @@
for i in range(len(pfit_leastsq)):
try:
error.append(np.absolute(pcov[i][i]) ** 0.5)
- except BaseException:
+ except IndexError:
error.append(0.00)
perr_leastsq = np.array(error)
# exit
| {"golden_diff": "diff --git a/WrightTools/kit/_leastsq.py b/WrightTools/kit/_leastsq.py\n--- a/WrightTools/kit/_leastsq.py\n+++ b/WrightTools/kit/_leastsq.py\n@@ -71,7 +71,7 @@\n for i in range(len(pfit_leastsq)):\n try:\n error.append(np.absolute(pcov[i][i]) ** 0.5)\n- except BaseException:\n+ except IndexError:\n error.append(0.00)\n perr_leastsq = np.array(error)\n # exit\n", "issue": "Kit leastsq should not except BaseException\nhttps://github.com/wright-group/WrightTools/blob/f22920579f45632b4123661d9832ff0cc1b614c4/WrightTools/kit/_leastsq.py#L74\r\n\r\nThe exception caught should be limited to those known to be raised inside.\n", "before_files": [{"content": "\"\"\"Least-square fitting tools.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nfrom ._utilities import Timer\n\nimport numpy as np\n\nfrom scipy import optimize as scipy_optimize\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"leastsqfitter\"]\n\n\n# --- functions -----------------------------------------------------------------------------------\n\n\ndef leastsqfitter(p0, datax, datay, function, verbose=False, cov_verbose=False):\n \"\"\"Conveniently call scipy.optmize.leastsq().\n\n Returns fit parameters and their errors.\n\n Parameters\n ----------\n p0 : list\n list of guess parameters to pass to function\n datax : array\n array of independent values\n datay : array\n array of dependent values\n function : function\n function object to fit data to. Must be of the callable form function(p, x)\n verbose : bool\n toggles printing of fit time, fit params, and fit param errors\n cov_verbose : bool\n toggles printing of covarience matrix\n\n Returns\n -------\n pfit_leastsq : list\n list of fit parameters. s.t. the error between datay and function(p, datax) is minimized\n perr_leastsq : list\n list of fit parameter errors (1 std)\n \"\"\"\n timer = Timer(verbose=False)\n with timer:\n # define error function\n def errfunc(p, x, y):\n return y - function(p, x)\n\n # run optimization\n pfit_leastsq, pcov, infodict, errmsg, success = scipy_optimize.leastsq(\n errfunc, p0, args=(datax, datay), full_output=1, epsfcn=0.0001\n )\n # calculate covarience matrix\n # original idea https://stackoverflow.com/a/21844726\n if (len(datay) > len(p0)) and pcov is not None:\n s_sq = (errfunc(pfit_leastsq, datax, datay) ** 2).sum() / (len(datay) - len(p0))\n pcov = pcov * s_sq\n if cov_verbose:\n print(pcov)\n else:\n pcov = np.inf\n # calculate and write errors\n error = []\n for i in range(len(pfit_leastsq)):\n try:\n error.append(np.absolute(pcov[i][i]) ** 0.5)\n except BaseException:\n error.append(0.00)\n perr_leastsq = np.array(error)\n # exit\n if verbose:\n print(\"fit params: \", pfit_leastsq)\n print(\"fit params error: \", perr_leastsq)\n print(\"fitting done in %f seconds\" % timer.interval)\n return pfit_leastsq, perr_leastsq\n", "path": "WrightTools/kit/_leastsq.py"}], "after_files": [{"content": "\"\"\"Least-square fitting tools.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nfrom ._utilities import Timer\n\nimport numpy as np\n\nfrom scipy import optimize as scipy_optimize\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\n__all__ = [\"leastsqfitter\"]\n\n\n# --- functions -----------------------------------------------------------------------------------\n\n\ndef leastsqfitter(p0, datax, datay, function, verbose=False, cov_verbose=False):\n \"\"\"Conveniently call scipy.optmize.leastsq().\n\n Returns fit parameters and their errors.\n\n Parameters\n ----------\n p0 : list\n list of guess parameters to pass to function\n datax : array\n array of independent values\n datay : array\n array of dependent values\n function : function\n function object to fit data to. Must be of the callable form function(p, x)\n verbose : bool\n toggles printing of fit time, fit params, and fit param errors\n cov_verbose : bool\n toggles printing of covarience matrix\n\n Returns\n -------\n pfit_leastsq : list\n list of fit parameters. s.t. the error between datay and function(p, datax) is minimized\n perr_leastsq : list\n list of fit parameter errors (1 std)\n \"\"\"\n timer = Timer(verbose=False)\n with timer:\n # define error function\n def errfunc(p, x, y):\n return y - function(p, x)\n\n # run optimization\n pfit_leastsq, pcov, infodict, errmsg, success = scipy_optimize.leastsq(\n errfunc, p0, args=(datax, datay), full_output=1, epsfcn=0.0001\n )\n # calculate covarience matrix\n # original idea https://stackoverflow.com/a/21844726\n if (len(datay) > len(p0)) and pcov is not None:\n s_sq = (errfunc(pfit_leastsq, datax, datay) ** 2).sum() / (len(datay) - len(p0))\n pcov = pcov * s_sq\n if cov_verbose:\n print(pcov)\n else:\n pcov = np.inf\n # calculate and write errors\n error = []\n for i in range(len(pfit_leastsq)):\n try:\n error.append(np.absolute(pcov[i][i]) ** 0.5)\n except IndexError:\n error.append(0.00)\n perr_leastsq = np.array(error)\n # exit\n if verbose:\n print(\"fit params: \", pfit_leastsq)\n print(\"fit params error: \", perr_leastsq)\n print(\"fitting done in %f seconds\" % timer.interval)\n return pfit_leastsq, perr_leastsq\n", "path": "WrightTools/kit/_leastsq.py"}]} | 1,131 | 131 |
gh_patches_debug_20609 | rasdani/github-patches | git_diff | svthalia__concrexit-3484 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Merchandise page redesign
### Is your feature request related to a problem? Please describe.
The current merchandise page is a long list with lot's of text. Part of this problem is the board not hiding the merchandise items that are sold out, but I think some other layout than a list would help to improve the look.
### Describe the solution you'd like
View more images side by side, and make the images larger. The text is not very important for the merch at all, so it can be pushed to the background.
### Motivation
The board is getting new merch and would like the page to look better to get people interested in the merch.
### Describe alternatives you've considered
Keep the page as is, because people will buy merch anyway through whatsapp promotion etc.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/merchandise/urls.py`
Content:
```
1 """Defines the routes provided in this package."""
2 from django.urls import include, path
3
4 from . import views
5
6 #: the name of the application
7 app_name = "merchandise"
8
9 #: the urls provided by this package
10 urlpatterns = [
11 path(
12 "association/merchandise/",
13 include(
14 [
15 path("", views.index, name="index"),
16 ]
17 ),
18 )
19 ]
20
```
Path: `website/merchandise/views.py`
Content:
```
1 """The views for the merchandise package."""
2 from django.shortcuts import render
3
4 from merchandise.models import MerchandiseItem
5
6
7 def index(request):
8 """Render the index view.
9
10 :param request: the request object
11 :return: the response
12 """
13 items = MerchandiseItem.objects.all()
14
15 return render(request, "merchandise/index.html", {"items": items})
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/merchandise/urls.py b/website/merchandise/urls.py
--- a/website/merchandise/urls.py
+++ b/website/merchandise/urls.py
@@ -15,5 +15,6 @@
path("", views.index, name="index"),
]
),
- )
+ ),
+ path("association/merchandise/<int:id>/", views.product_page, name="product"),
]
diff --git a/website/merchandise/views.py b/website/merchandise/views.py
--- a/website/merchandise/views.py
+++ b/website/merchandise/views.py
@@ -1,4 +1,5 @@
"""The views for the merchandise package."""
+from django.http import Http404
from django.shortcuts import render
from merchandise.models import MerchandiseItem
@@ -13,3 +14,14 @@
items = MerchandiseItem.objects.all()
return render(request, "merchandise/index.html", {"items": items})
+
+
+def product_page(request, id):
+ try:
+ product = MerchandiseItem.objects.get(pk=id)
+ except MerchandiseItem.DoesNotExist:
+ raise Http404(
+ "This item may not exists, or is removed. Please check if the link is correct!"
+ )
+
+ return render(request, "merchandise/product_page.html", {"product": product})
| {"golden_diff": "diff --git a/website/merchandise/urls.py b/website/merchandise/urls.py\n--- a/website/merchandise/urls.py\n+++ b/website/merchandise/urls.py\n@@ -15,5 +15,6 @@\n path(\"\", views.index, name=\"index\"),\n ]\n ),\n- )\n+ ),\n+ path(\"association/merchandise/<int:id>/\", views.product_page, name=\"product\"),\n ]\ndiff --git a/website/merchandise/views.py b/website/merchandise/views.py\n--- a/website/merchandise/views.py\n+++ b/website/merchandise/views.py\n@@ -1,4 +1,5 @@\n \"\"\"The views for the merchandise package.\"\"\"\n+from django.http import Http404\n from django.shortcuts import render\n \n from merchandise.models import MerchandiseItem\n@@ -13,3 +14,14 @@\n items = MerchandiseItem.objects.all()\n \n return render(request, \"merchandise/index.html\", {\"items\": items})\n+\n+\n+def product_page(request, id):\n+ try:\n+ product = MerchandiseItem.objects.get(pk=id)\n+ except MerchandiseItem.DoesNotExist:\n+ raise Http404(\n+ \"This item may not exists, or is removed. Please check if the link is correct!\"\n+ )\n+\n+ return render(request, \"merchandise/product_page.html\", {\"product\": product})\n", "issue": "Merchandise page redesign\n### Is your feature request related to a problem? Please describe.\r\n\r\nThe current merchandise page is a long list with lot's of text. Part of this problem is the board not hiding the merchandise items that are sold out, but I think some other layout than a list would help to improve the look.\r\n\r\n### Describe the solution you'd like\r\n\r\nView more images side by side, and make the images larger. The text is not very important for the merch at all, so it can be pushed to the background.\r\n\r\n### Motivation\r\n\r\nThe board is getting new merch and would like the page to look better to get people interested in the merch.\r\n\r\n### Describe alternatives you've considered\r\n\r\nKeep the page as is, because people will buy merch anyway through whatsapp promotion etc.\r\n\n", "before_files": [{"content": "\"\"\"Defines the routes provided in this package.\"\"\"\nfrom django.urls import include, path\n\nfrom . import views\n\n#: the name of the application\napp_name = \"merchandise\"\n\n#: the urls provided by this package\nurlpatterns = [\n path(\n \"association/merchandise/\",\n include(\n [\n path(\"\", views.index, name=\"index\"),\n ]\n ),\n )\n]\n", "path": "website/merchandise/urls.py"}, {"content": "\"\"\"The views for the merchandise package.\"\"\"\nfrom django.shortcuts import render\n\nfrom merchandise.models import MerchandiseItem\n\n\ndef index(request):\n \"\"\"Render the index view.\n\n :param request: the request object\n :return: the response\n \"\"\"\n items = MerchandiseItem.objects.all()\n\n return render(request, \"merchandise/index.html\", {\"items\": items})\n", "path": "website/merchandise/views.py"}], "after_files": [{"content": "\"\"\"Defines the routes provided in this package.\"\"\"\nfrom django.urls import include, path\n\nfrom . import views\n\n#: the name of the application\napp_name = \"merchandise\"\n\n#: the urls provided by this package\nurlpatterns = [\n path(\n \"association/merchandise/\",\n include(\n [\n path(\"\", views.index, name=\"index\"),\n ]\n ),\n ),\n path(\"association/merchandise/<int:id>/\", views.product_page, name=\"product\"),\n]\n", "path": "website/merchandise/urls.py"}, {"content": "\"\"\"The views for the merchandise package.\"\"\"\nfrom django.http import Http404\nfrom django.shortcuts import render\n\nfrom merchandise.models import MerchandiseItem\n\n\ndef index(request):\n \"\"\"Render the index view.\n\n :param request: the request object\n :return: the response\n \"\"\"\n items = MerchandiseItem.objects.all()\n\n return render(request, \"merchandise/index.html\", {\"items\": items})\n\n\ndef product_page(request, id):\n try:\n product = MerchandiseItem.objects.get(pk=id)\n except MerchandiseItem.DoesNotExist:\n raise Http404(\n \"This item may not exists, or is removed. Please check if the link is correct!\"\n )\n\n return render(request, \"merchandise/product_page.html\", {\"product\": product})\n", "path": "website/merchandise/views.py"}]} | 660 | 315 |
gh_patches_debug_24019 | rasdani/github-patches | git_diff | nilearn__nilearn-2096 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
scipy.misc.imread() replaced by scipy.imageio.imread() in v1.2
`scipy.misc.imread()` was deprecatd in SciPy 1.0 & replaced in SciPy 1.2 by `scipy.imageio.imread()`
https://docs.scipy.org/doc/scipy-1.2.1/reference/generated/scipy.misc.imread.html
This is causing failures in CircleCI.
I will work on this once PR #2076 doctest problem has been addressed, since we need this issue to be resolved before it can be merged. Intended today.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/02_decoding/plot_haxby_stimuli.py`
Content:
```
1 """
2 Show stimuli of Haxby et al. dataset
3 ===============================================================================
4
5 In this script we plot an overview of the stimuli used in "Distributed
6 and Overlapping Representations of Faces and Objects in Ventral Temporal
7 Cortex" (Science 2001)
8 """
9
10 from scipy.misc import imread
11 import matplotlib.pyplot as plt
12
13 from nilearn import datasets
14 from nilearn.plotting import show
15
16 haxby_dataset = datasets.fetch_haxby(subjects=[], fetch_stimuli=True)
17 stimulus_information = haxby_dataset.stimuli
18
19 for stim_type in sorted(stimulus_information.keys()):
20 if stim_type == b'controls':
21 # skip control images, there are too many
22 continue
23
24 file_names = stimulus_information[stim_type]
25
26 plt.figure()
27 for i in range(48):
28 plt.subplot(6, 8, i + 1)
29 try:
30 plt.imshow(imread(file_names[i]), cmap=plt.cm.gray)
31 except:
32 # just go to the next one if the file is not present
33 pass
34 plt.axis("off")
35 plt.suptitle(stim_type)
36
37 show()
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/02_decoding/plot_haxby_stimuli.py b/examples/02_decoding/plot_haxby_stimuli.py
--- a/examples/02_decoding/plot_haxby_stimuli.py
+++ b/examples/02_decoding/plot_haxby_stimuli.py
@@ -7,7 +7,6 @@
Cortex" (Science 2001)
"""
-from scipy.misc import imread
import matplotlib.pyplot as plt
from nilearn import datasets
@@ -16,22 +15,19 @@
haxby_dataset = datasets.fetch_haxby(subjects=[], fetch_stimuli=True)
stimulus_information = haxby_dataset.stimuli
-for stim_type in sorted(stimulus_information.keys()):
- if stim_type == b'controls':
- # skip control images, there are too many
- continue
-
- file_names = stimulus_information[stim_type]
-
- plt.figure()
- for i in range(48):
- plt.subplot(6, 8, i + 1)
- try:
- plt.imshow(imread(file_names[i]), cmap=plt.cm.gray)
- except:
- # just go to the next one if the file is not present
- pass
- plt.axis("off")
- plt.suptitle(stim_type)
+for stim_type in stimulus_information:
+ # skip control images, there are too many
+ if stim_type != 'controls':
+
+ file_names = stimulus_information[stim_type]
+
+ fig, axes = plt.subplots(6, 8)
+ fig.suptitle(stim_type)
+
+ for img_path, ax in zip(file_names, axes.ravel()):
+ ax.imshow(plt.imread(img_path), cmap=plt.cm.gray)
+
+ for ax in axes.ravel():
+ ax.axis("off")
show()
| {"golden_diff": "diff --git a/examples/02_decoding/plot_haxby_stimuli.py b/examples/02_decoding/plot_haxby_stimuli.py\n--- a/examples/02_decoding/plot_haxby_stimuli.py\n+++ b/examples/02_decoding/plot_haxby_stimuli.py\n@@ -7,7 +7,6 @@\n Cortex\" (Science 2001)\n \"\"\"\n \n-from scipy.misc import imread\n import matplotlib.pyplot as plt\n \n from nilearn import datasets\n@@ -16,22 +15,19 @@\n haxby_dataset = datasets.fetch_haxby(subjects=[], fetch_stimuli=True)\n stimulus_information = haxby_dataset.stimuli\n \n-for stim_type in sorted(stimulus_information.keys()):\n- if stim_type == b'controls':\n- # skip control images, there are too many\n- continue\n-\n- file_names = stimulus_information[stim_type]\n-\n- plt.figure()\n- for i in range(48):\n- plt.subplot(6, 8, i + 1)\n- try:\n- plt.imshow(imread(file_names[i]), cmap=plt.cm.gray)\n- except:\n- # just go to the next one if the file is not present\n- pass\n- plt.axis(\"off\")\n- plt.suptitle(stim_type)\n+for stim_type in stimulus_information:\n+ # skip control images, there are too many\n+ if stim_type != 'controls':\n+\n+ file_names = stimulus_information[stim_type]\n+\n+ fig, axes = plt.subplots(6, 8)\n+ fig.suptitle(stim_type)\n+\n+ for img_path, ax in zip(file_names, axes.ravel()):\n+ ax.imshow(plt.imread(img_path), cmap=plt.cm.gray)\n+\n+ for ax in axes.ravel():\n+ ax.axis(\"off\")\n \n show()\n", "issue": "scipy.misc.imread() replaced by scipy.imageio.imread() in v1.2\n`scipy.misc.imread()` was deprecatd in SciPy 1.0 & replaced in SciPy 1.2 by `scipy.imageio.imread()`\r\n\r\nhttps://docs.scipy.org/doc/scipy-1.2.1/reference/generated/scipy.misc.imread.html\r\n\r\nThis is causing failures in CircleCI. \r\n\r\nI will work on this once PR #2076 doctest problem has been addressed, since we need this issue to be resolved before it can be merged. Intended today.\n", "before_files": [{"content": "\"\"\"\nShow stimuli of Haxby et al. dataset\n===============================================================================\n\nIn this script we plot an overview of the stimuli used in \"Distributed\nand Overlapping Representations of Faces and Objects in Ventral Temporal\nCortex\" (Science 2001)\n\"\"\"\n\nfrom scipy.misc import imread\nimport matplotlib.pyplot as plt\n\nfrom nilearn import datasets\nfrom nilearn.plotting import show\n\nhaxby_dataset = datasets.fetch_haxby(subjects=[], fetch_stimuli=True)\nstimulus_information = haxby_dataset.stimuli\n\nfor stim_type in sorted(stimulus_information.keys()):\n if stim_type == b'controls':\n # skip control images, there are too many\n continue\n\n file_names = stimulus_information[stim_type]\n\n plt.figure()\n for i in range(48):\n plt.subplot(6, 8, i + 1)\n try:\n plt.imshow(imread(file_names[i]), cmap=plt.cm.gray)\n except:\n # just go to the next one if the file is not present\n pass\n plt.axis(\"off\")\n plt.suptitle(stim_type)\n\nshow()\n", "path": "examples/02_decoding/plot_haxby_stimuli.py"}], "after_files": [{"content": "\"\"\"\nShow stimuli of Haxby et al. dataset\n===============================================================================\n\nIn this script we plot an overview of the stimuli used in \"Distributed\nand Overlapping Representations of Faces and Objects in Ventral Temporal\nCortex\" (Science 2001)\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nfrom nilearn import datasets\nfrom nilearn.plotting import show\n\nhaxby_dataset = datasets.fetch_haxby(subjects=[], fetch_stimuli=True)\nstimulus_information = haxby_dataset.stimuli\n\nfor stim_type in stimulus_information:\n # skip control images, there are too many\n if stim_type != 'controls':\n\n file_names = stimulus_information[stim_type]\n\n fig, axes = plt.subplots(6, 8)\n fig.suptitle(stim_type)\n\n for img_path, ax in zip(file_names, axes.ravel()):\n ax.imshow(plt.imread(img_path), cmap=plt.cm.gray)\n\n for ax in axes.ravel():\n ax.axis(\"off\")\n\nshow()\n", "path": "examples/02_decoding/plot_haxby_stimuli.py"}]} | 708 | 413 |
gh_patches_debug_4779 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-4960 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Insertion du smiley >_<
On obtient `>_<`
Solution 1 : Modifier le code ici : https://github.com/zestedesavoir/zds-site/blob/4ae0431bbf199e318dd6f2b1301ac7b6adc40198/assets/js/editor.js#L132 Vérifier qu'il n'y a pas un bug/fail avec ">" et "<".
Solution 2 : On peut ajouter l'alias `X/` pour ce smiley et remplacer le code dans l'éditeur. https://github.com/zestedesavoir/zds-site/blob/56a5b2e8b524848efa2d328c0a46365a44c1d43e/zds/utils/templatetags/smileys_def.py#L26
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/utils/templatetags/smileys_def.py`
Content:
```
1 import os
2 from django.conf import settings
3
4 SMILEYS_BASE_PATH = os.path.join(settings.BASE_DIR, 'dist/smileys')
5 LICENSES_BASE_PATH = os.path.join(settings.BASE_DIR, 'dist/licenses')
6 SMILEYS_BASE_URL = os.path.join(settings.STATIC_URL, 'smileys')
7
8 SMILEYS_BASE = {
9 'smile.png': (':)', ':-)', ),
10 'heureux.png': (':D', ':-D', ),
11 'clin.png': (';)', ';-)', ),
12 'langue.png': (':p', ':P', ':-p', ':-P', ),
13 'rire.gif': (':lol:', ),
14 'unsure.gif': (':euh:', ),
15 'triste.png': (':(', ':-(', ),
16 'huh.png': (':o', ':-o', ':O', ':-O', ),
17 'mechant.png': (':colere2:', ),
18 'blink.gif': ('o_O', 'O_o', ),
19 'hihi.png': ('^^', ),
20 'siffle.png': (':-°', ':°', ),
21 'ange.png': (':ange:', ),
22 'angry.gif': (':colere:', ),
23 'diable.png': (':diable:', ),
24 'magicien.png': (':magicien:', ),
25 'ninja.gif': (':ninja:', ),
26 'pinch.png': ('>_<', ),
27 'pirate.png': (':pirate:', ),
28 'pleure.png': (":'(", ),
29 'rouge.png': (':honte:', ),
30 'soleil.png': (':soleil:', ),
31 'waw.png': (':waw:', ),
32 'zorro.png': (':zorro:', ),
33 'cthulhu.png': ('^(;,;)^', ),
34 }
35
36 smileys = {}
37 for image_file, symbols in SMILEYS_BASE.items():
38 for symbol in symbols:
39 smileys[symbol] = os.path.join(SMILEYS_BASE_URL, image_file)
40
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/utils/templatetags/smileys_def.py b/zds/utils/templatetags/smileys_def.py
--- a/zds/utils/templatetags/smileys_def.py
+++ b/zds/utils/templatetags/smileys_def.py
@@ -23,7 +23,7 @@
'diable.png': (':diable:', ),
'magicien.png': (':magicien:', ),
'ninja.gif': (':ninja:', ),
- 'pinch.png': ('>_<', ),
+ 'pinch.png': ('>_<', 'X/'),
'pirate.png': (':pirate:', ),
'pleure.png': (":'(", ),
'rouge.png': (':honte:', ),
| {"golden_diff": "diff --git a/zds/utils/templatetags/smileys_def.py b/zds/utils/templatetags/smileys_def.py\n--- a/zds/utils/templatetags/smileys_def.py\n+++ b/zds/utils/templatetags/smileys_def.py\n@@ -23,7 +23,7 @@\n 'diable.png': (':diable:', ),\n 'magicien.png': (':magicien:', ),\n 'ninja.gif': (':ninja:', ),\n- 'pinch.png': ('>_<', ),\n+ 'pinch.png': ('>_<', 'X/'),\n 'pirate.png': (':pirate:', ),\n 'pleure.png': (\":'(\", ),\n 'rouge.png': (':honte:', ),\n", "issue": " Insertion du smiley >_<\nOn obtient `>_<`\r\n\r\nSolution 1 : Modifier le code ici : https://github.com/zestedesavoir/zds-site/blob/4ae0431bbf199e318dd6f2b1301ac7b6adc40198/assets/js/editor.js#L132 V\u00e9rifier qu'il n'y a pas un bug/fail avec \">\" et \"<\".\r\n\r\nSolution 2 : On peut ajouter l'alias `X/` pour ce smiley et remplacer le code dans l'\u00e9diteur. https://github.com/zestedesavoir/zds-site/blob/56a5b2e8b524848efa2d328c0a46365a44c1d43e/zds/utils/templatetags/smileys_def.py#L26\n", "before_files": [{"content": "import os\nfrom django.conf import settings\n\nSMILEYS_BASE_PATH = os.path.join(settings.BASE_DIR, 'dist/smileys')\nLICENSES_BASE_PATH = os.path.join(settings.BASE_DIR, 'dist/licenses')\nSMILEYS_BASE_URL = os.path.join(settings.STATIC_URL, 'smileys')\n\nSMILEYS_BASE = {\n 'smile.png': (':)', ':-)', ),\n 'heureux.png': (':D', ':-D', ),\n 'clin.png': (';)', ';-)', ),\n 'langue.png': (':p', ':P', ':-p', ':-P', ),\n 'rire.gif': (':lol:', ),\n 'unsure.gif': (':euh:', ),\n 'triste.png': (':(', ':-(', ),\n 'huh.png': (':o', ':-o', ':O', ':-O', ),\n 'mechant.png': (':colere2:', ),\n 'blink.gif': ('o_O', 'O_o', ),\n 'hihi.png': ('^^', ),\n 'siffle.png': (':-\u00b0', ':\u00b0', ),\n 'ange.png': (':ange:', ),\n 'angry.gif': (':colere:', ),\n 'diable.png': (':diable:', ),\n 'magicien.png': (':magicien:', ),\n 'ninja.gif': (':ninja:', ),\n 'pinch.png': ('>_<', ),\n 'pirate.png': (':pirate:', ),\n 'pleure.png': (\":'(\", ),\n 'rouge.png': (':honte:', ),\n 'soleil.png': (':soleil:', ),\n 'waw.png': (':waw:', ),\n 'zorro.png': (':zorro:', ),\n 'cthulhu.png': ('^(;,;)^', ),\n}\n\nsmileys = {}\nfor image_file, symbols in SMILEYS_BASE.items():\n for symbol in symbols:\n smileys[symbol] = os.path.join(SMILEYS_BASE_URL, image_file)\n", "path": "zds/utils/templatetags/smileys_def.py"}], "after_files": [{"content": "import os\nfrom django.conf import settings\n\nSMILEYS_BASE_PATH = os.path.join(settings.BASE_DIR, 'dist/smileys')\nLICENSES_BASE_PATH = os.path.join(settings.BASE_DIR, 'dist/licenses')\nSMILEYS_BASE_URL = os.path.join(settings.STATIC_URL, 'smileys')\n\nSMILEYS_BASE = {\n 'smile.png': (':)', ':-)', ),\n 'heureux.png': (':D', ':-D', ),\n 'clin.png': (';)', ';-)', ),\n 'langue.png': (':p', ':P', ':-p', ':-P', ),\n 'rire.gif': (':lol:', ),\n 'unsure.gif': (':euh:', ),\n 'triste.png': (':(', ':-(', ),\n 'huh.png': (':o', ':-o', ':O', ':-O', ),\n 'mechant.png': (':colere2:', ),\n 'blink.gif': ('o_O', 'O_o', ),\n 'hihi.png': ('^^', ),\n 'siffle.png': (':-\u00b0', ':\u00b0', ),\n 'ange.png': (':ange:', ),\n 'angry.gif': (':colere:', ),\n 'diable.png': (':diable:', ),\n 'magicien.png': (':magicien:', ),\n 'ninja.gif': (':ninja:', ),\n 'pinch.png': ('>_<', 'X/'),\n 'pirate.png': (':pirate:', ),\n 'pleure.png': (\":'(\", ),\n 'rouge.png': (':honte:', ),\n 'soleil.png': (':soleil:', ),\n 'waw.png': (':waw:', ),\n 'zorro.png': (':zorro:', ),\n 'cthulhu.png': ('^(;,;)^', ),\n}\n\nsmileys = {}\nfor image_file, symbols in SMILEYS_BASE.items():\n for symbol in symbols:\n smileys[symbol] = os.path.join(SMILEYS_BASE_URL, image_file)\n", "path": "zds/utils/templatetags/smileys_def.py"}]} | 987 | 176 |
gh_patches_debug_25265 | rasdani/github-patches | git_diff | tinygrad__tinygrad-1562 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tensor.__eq__() with two bool tensors raises error on Torch backend
This was introduced from #1493
To reproduce:
```
In [24]: (Tensor([1], dtype=dtypes.bool, device="TORCH") == Tensor([1], dtype=dtypes.bool, device="TORCH")).realize()
RuntimeError: Subtraction, the `-` operator, with a bool tensor is not supported. If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.
```
RuntimeError is from pytorch
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tinygrad/runtime/ops_torch.py`
Content:
```
1 import torch
2 from typing import Dict, Callable, Optional
3 from tinygrad.ops import UnaryOps, BinaryOps, MovementOps, TernaryOps, Op, Interpreted
4 from tinygrad.helpers import getenv, dtypes, prod, DType
5 from tinygrad.runtime.ops_cpu import base_fxn_for_op, einsum_mulacc
6 from tinygrad.runtime.lib import RawBuffer
7
8 device = torch.device("cuda:0" if torch.cuda.is_available() else ("mps" if getenv("MPS", 0) else "cpu"))
9 type_map = {torch.float64: dtypes.float64, torch.float16: dtypes.float16, torch.float32: dtypes.float32, torch.int8: dtypes.int8, torch.int32: dtypes.int32, torch.int64: dtypes.int64, torch.uint8: dtypes.uint8, torch.bool: dtypes.bool}
10 inverse_type_map = {v:k for k,v in type_map.items()}
11
12 def as_strided(x, arg):
13 if any(i < 0 for i in arg[1]):
14 return torch.as_strided(x.contiguous(), arg[0], tuple(abs(i) for i in arg[1]),
15 arg[2] + sum((s-1)*a if a < 0 else 0 for (s,a) in zip(arg[0], arg[1]))).flip([i for i,a in enumerate(arg[1]) if a < 0])
16 return torch.as_strided(x.contiguous(), arg[0], arg[1], arg[2])
17
18 torch_fxn_for_op: Dict[Op, Callable] = {**base_fxn_for_op, **{
19 UnaryOps.NOOP: lambda x: x.contiguous(), UnaryOps.SQRT: lambda x: x.sqrt(), UnaryOps.EXP2: lambda x: x.exp2(), UnaryOps.LOG2: lambda x: x.log2(), UnaryOps.SIN: torch.sin,
20 UnaryOps.CAST: lambda x,y: (x.view if y[1] else x.type)(next(k for k,v in type_map.items() if v==y[0])),
21 BinaryOps.MAX: torch.maximum, BinaryOps.CMPLT: lambda x,y: (x<y).type(torch.promote_types(x.dtype, y.dtype)),
22 MovementOps.PAD: lambda x, padding: torch.nn.functional.pad(x, [item for sublist in padding[::-1] for item in sublist]),
23 TernaryOps.MULACC: einsum_mulacc(lambda s,a,b: torch.einsum(s, a.float(), b.float()).type(torch.promote_types(a.dtype, b.dtype)), lambda x: x.stride(), lambda x,s: x.expand(s)),
24 TernaryOps.WHERE: lambda x, y, z: torch.where(x != 0, y, z),
25 MovementOps.STRIDE: lambda x, arg: x[tuple(slice(None, None, abs(i)) for i in arg)].flip([i for i,a in enumerate(arg) if a < 0]),
26 MovementOps.EXPAND: lambda x, arg: x.expand(arg), MovementOps.PERMUTE: lambda x, arg: x.permute(arg),
27 MovementOps.AS_STRIDED: as_strided
28 }}
29
30 class RawTorchBuffer(RawBuffer):
31 def __init__(self, size:int, dtype:DType, buf:Optional[torch.Tensor]=None): super().__init__(size, dtype, buf if buf is not None else torch.empty([size], dtype=inverse_type_map[dtype]))
32 @classmethod
33 def fromCPU(cls, x):
34 buf = torch.from_numpy(x if all(s>=0 for s in x.strides) else x.copy()).requires_grad_(False).to(device)
35 return cls(prod(x.shape), type_map[buf.dtype], buf)
36 def toCPU(self): return self._buf.cpu().numpy()
37 TorchBuffer = Interpreted(RawTorchBuffer, torch_fxn_for_op, from_underlying=lambda x: RawTorchBuffer(prod(x.shape), type_map[x.dtype], x))
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tinygrad/runtime/ops_torch.py b/tinygrad/runtime/ops_torch.py
--- a/tinygrad/runtime/ops_torch.py
+++ b/tinygrad/runtime/ops_torch.py
@@ -18,7 +18,7 @@
torch_fxn_for_op: Dict[Op, Callable] = {**base_fxn_for_op, **{
UnaryOps.NOOP: lambda x: x.contiguous(), UnaryOps.SQRT: lambda x: x.sqrt(), UnaryOps.EXP2: lambda x: x.exp2(), UnaryOps.LOG2: lambda x: x.log2(), UnaryOps.SIN: torch.sin,
UnaryOps.CAST: lambda x,y: (x.view if y[1] else x.type)(next(k for k,v in type_map.items() if v==y[0])),
- BinaryOps.MAX: torch.maximum, BinaryOps.CMPLT: lambda x,y: (x<y).type(torch.promote_types(x.dtype, y.dtype)),
+ BinaryOps.MAX: torch.maximum, BinaryOps.CMPLT: lambda x,y: (x<y).type(torch.promote_types(x.dtype, y.dtype)), BinaryOps.SUB: lambda x,y: torch.logical_xor(x, y) if y.dtype is torch.bool else torch.sub(x, y),
MovementOps.PAD: lambda x, padding: torch.nn.functional.pad(x, [item for sublist in padding[::-1] for item in sublist]),
TernaryOps.MULACC: einsum_mulacc(lambda s,a,b: torch.einsum(s, a.float(), b.float()).type(torch.promote_types(a.dtype, b.dtype)), lambda x: x.stride(), lambda x,s: x.expand(s)),
TernaryOps.WHERE: lambda x, y, z: torch.where(x != 0, y, z),
| {"golden_diff": "diff --git a/tinygrad/runtime/ops_torch.py b/tinygrad/runtime/ops_torch.py\n--- a/tinygrad/runtime/ops_torch.py\n+++ b/tinygrad/runtime/ops_torch.py\n@@ -18,7 +18,7 @@\n torch_fxn_for_op: Dict[Op, Callable] = {**base_fxn_for_op, **{\n UnaryOps.NOOP: lambda x: x.contiguous(), UnaryOps.SQRT: lambda x: x.sqrt(), UnaryOps.EXP2: lambda x: x.exp2(), UnaryOps.LOG2: lambda x: x.log2(), UnaryOps.SIN: torch.sin,\n UnaryOps.CAST: lambda x,y: (x.view if y[1] else x.type)(next(k for k,v in type_map.items() if v==y[0])),\n- BinaryOps.MAX: torch.maximum, BinaryOps.CMPLT: lambda x,y: (x<y).type(torch.promote_types(x.dtype, y.dtype)),\n+ BinaryOps.MAX: torch.maximum, BinaryOps.CMPLT: lambda x,y: (x<y).type(torch.promote_types(x.dtype, y.dtype)), BinaryOps.SUB: lambda x,y: torch.logical_xor(x, y) if y.dtype is torch.bool else torch.sub(x, y),\n MovementOps.PAD: lambda x, padding: torch.nn.functional.pad(x, [item for sublist in padding[::-1] for item in sublist]),\n TernaryOps.MULACC: einsum_mulacc(lambda s,a,b: torch.einsum(s, a.float(), b.float()).type(torch.promote_types(a.dtype, b.dtype)), lambda x: x.stride(), lambda x,s: x.expand(s)),\n TernaryOps.WHERE: lambda x, y, z: torch.where(x != 0, y, z),\n", "issue": "Tensor.__eq__() with two bool tensors raises error on Torch backend\nThis was introduced from #1493\r\n\r\nTo reproduce:\r\n```\r\nIn [24]: (Tensor([1], dtype=dtypes.bool, device=\"TORCH\") == Tensor([1], dtype=dtypes.bool, device=\"TORCH\")).realize()\r\nRuntimeError: Subtraction, the `-` operator, with a bool tensor is not supported. If you are trying to invert a mask, use the `~` or `logical_not()` operator instead.\r\n```\r\nRuntimeError is from pytorch\r\n\r\n\n", "before_files": [{"content": "import torch\nfrom typing import Dict, Callable, Optional\nfrom tinygrad.ops import UnaryOps, BinaryOps, MovementOps, TernaryOps, Op, Interpreted\nfrom tinygrad.helpers import getenv, dtypes, prod, DType\nfrom tinygrad.runtime.ops_cpu import base_fxn_for_op, einsum_mulacc\nfrom tinygrad.runtime.lib import RawBuffer\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else (\"mps\" if getenv(\"MPS\", 0) else \"cpu\"))\ntype_map = {torch.float64: dtypes.float64, torch.float16: dtypes.float16, torch.float32: dtypes.float32, torch.int8: dtypes.int8, torch.int32: dtypes.int32, torch.int64: dtypes.int64, torch.uint8: dtypes.uint8, torch.bool: dtypes.bool}\ninverse_type_map = {v:k for k,v in type_map.items()}\n\ndef as_strided(x, arg):\n if any(i < 0 for i in arg[1]):\n return torch.as_strided(x.contiguous(), arg[0], tuple(abs(i) for i in arg[1]),\n arg[2] + sum((s-1)*a if a < 0 else 0 for (s,a) in zip(arg[0], arg[1]))).flip([i for i,a in enumerate(arg[1]) if a < 0])\n return torch.as_strided(x.contiguous(), arg[0], arg[1], arg[2])\n\ntorch_fxn_for_op: Dict[Op, Callable] = {**base_fxn_for_op, **{\n UnaryOps.NOOP: lambda x: x.contiguous(), UnaryOps.SQRT: lambda x: x.sqrt(), UnaryOps.EXP2: lambda x: x.exp2(), UnaryOps.LOG2: lambda x: x.log2(), UnaryOps.SIN: torch.sin,\n UnaryOps.CAST: lambda x,y: (x.view if y[1] else x.type)(next(k for k,v in type_map.items() if v==y[0])),\n BinaryOps.MAX: torch.maximum, BinaryOps.CMPLT: lambda x,y: (x<y).type(torch.promote_types(x.dtype, y.dtype)),\n MovementOps.PAD: lambda x, padding: torch.nn.functional.pad(x, [item for sublist in padding[::-1] for item in sublist]),\n TernaryOps.MULACC: einsum_mulacc(lambda s,a,b: torch.einsum(s, a.float(), b.float()).type(torch.promote_types(a.dtype, b.dtype)), lambda x: x.stride(), lambda x,s: x.expand(s)),\n TernaryOps.WHERE: lambda x, y, z: torch.where(x != 0, y, z),\n MovementOps.STRIDE: lambda x, arg: x[tuple(slice(None, None, abs(i)) for i in arg)].flip([i for i,a in enumerate(arg) if a < 0]),\n MovementOps.EXPAND: lambda x, arg: x.expand(arg), MovementOps.PERMUTE: lambda x, arg: x.permute(arg),\n MovementOps.AS_STRIDED: as_strided\n}}\n\nclass RawTorchBuffer(RawBuffer):\n def __init__(self, size:int, dtype:DType, buf:Optional[torch.Tensor]=None): super().__init__(size, dtype, buf if buf is not None else torch.empty([size], dtype=inverse_type_map[dtype]))\n @classmethod\n def fromCPU(cls, x):\n buf = torch.from_numpy(x if all(s>=0 for s in x.strides) else x.copy()).requires_grad_(False).to(device)\n return cls(prod(x.shape), type_map[buf.dtype], buf)\n def toCPU(self): return self._buf.cpu().numpy()\nTorchBuffer = Interpreted(RawTorchBuffer, torch_fxn_for_op, from_underlying=lambda x: RawTorchBuffer(prod(x.shape), type_map[x.dtype], x))\n", "path": "tinygrad/runtime/ops_torch.py"}], "after_files": [{"content": "import torch\nfrom typing import Dict, Callable, Optional\nfrom tinygrad.ops import UnaryOps, BinaryOps, MovementOps, TernaryOps, Op, Interpreted\nfrom tinygrad.helpers import getenv, dtypes, prod, DType\nfrom tinygrad.runtime.ops_cpu import base_fxn_for_op, einsum_mulacc\nfrom tinygrad.runtime.lib import RawBuffer\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else (\"mps\" if getenv(\"MPS\", 0) else \"cpu\"))\ntype_map = {torch.float64: dtypes.float64, torch.float16: dtypes.float16, torch.float32: dtypes.float32, torch.int8: dtypes.int8, torch.int32: dtypes.int32, torch.int64: dtypes.int64, torch.uint8: dtypes.uint8, torch.bool: dtypes.bool}\ninverse_type_map = {v:k for k,v in type_map.items()}\n\ndef as_strided(x, arg):\n if any(i < 0 for i in arg[1]):\n return torch.as_strided(x.contiguous(), arg[0], tuple(abs(i) for i in arg[1]),\n arg[2] + sum((s-1)*a if a < 0 else 0 for (s,a) in zip(arg[0], arg[1]))).flip([i for i,a in enumerate(arg[1]) if a < 0])\n return torch.as_strided(x.contiguous(), arg[0], arg[1], arg[2])\n\ntorch_fxn_for_op: Dict[Op, Callable] = {**base_fxn_for_op, **{\n UnaryOps.NOOP: lambda x: x.contiguous(), UnaryOps.SQRT: lambda x: x.sqrt(), UnaryOps.EXP2: lambda x: x.exp2(), UnaryOps.LOG2: lambda x: x.log2(), UnaryOps.SIN: torch.sin,\n UnaryOps.CAST: lambda x,y: (x.view if y[1] else x.type)(next(k for k,v in type_map.items() if v==y[0])),\n BinaryOps.MAX: torch.maximum, BinaryOps.CMPLT: lambda x,y: (x<y).type(torch.promote_types(x.dtype, y.dtype)), BinaryOps.SUB: lambda x,y: torch.logical_xor(x, y) if y.dtype is torch.bool else torch.sub(x, y),\n MovementOps.PAD: lambda x, padding: torch.nn.functional.pad(x, [item for sublist in padding[::-1] for item in sublist]),\n TernaryOps.MULACC: einsum_mulacc(lambda s,a,b: torch.einsum(s, a.float(), b.float()).type(torch.promote_types(a.dtype, b.dtype)), lambda x: x.stride(), lambda x,s: x.expand(s)),\n TernaryOps.WHERE: lambda x, y, z: torch.where(x != 0, y, z),\n MovementOps.STRIDE: lambda x, arg: x[tuple(slice(None, None, abs(i)) for i in arg)].flip([i for i,a in enumerate(arg) if a < 0]),\n MovementOps.EXPAND: lambda x, arg: x.expand(arg), MovementOps.PERMUTE: lambda x, arg: x.permute(arg),\n MovementOps.AS_STRIDED: as_strided\n}}\n\nclass RawTorchBuffer(RawBuffer):\n def __init__(self, size:int, dtype:DType, buf:Optional[torch.Tensor]=None): super().__init__(size, dtype, buf if buf is not None else torch.empty([size], dtype=inverse_type_map[dtype]))\n @classmethod\n def fromCPU(cls, x):\n buf = torch.from_numpy(x if all(s>=0 for s in x.strides) else x.copy()).requires_grad_(False).to(device)\n return cls(prod(x.shape), type_map[buf.dtype], buf)\n def toCPU(self): return self._buf.cpu().numpy()\nTorchBuffer = Interpreted(RawTorchBuffer, torch_fxn_for_op, from_underlying=lambda x: RawTorchBuffer(prod(x.shape), type_map[x.dtype], x))\n", "path": "tinygrad/runtime/ops_torch.py"}]} | 1,313 | 385 |
gh_patches_debug_32291 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-contrib-1253 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add more features for adding HTTP request / response headers to spans.
I already have https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1172 open for this, and I'll be breaking it in to smaller pieces at @lzchen 's request.
**Is your feature request related to a problem?**
Currently, you can only provide a list of full HTTP request / response header names to be added to the span.
There is also no capacity for header value redaction.
**Describe the solution you'd like**
It would be nice to be able to specify a regex or "all" to get all headers.
Header value redaction is also a must-have for us.
**Describe alternatives you've considered**
I considered doing this in my application, but it makes more sense to add it here.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from os import environ
16 from re import compile as re_compile
17 from re import search
18 from typing import Iterable, List
19 from urllib.parse import urlparse, urlunparse
20
21 from opentelemetry.semconv.trace import SpanAttributes
22
23 OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST = (
24 "OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST"
25 )
26 OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE = (
27 "OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE"
28 )
29
30 # List of recommended metrics attributes
31 _duration_attrs = {
32 SpanAttributes.HTTP_METHOD,
33 SpanAttributes.HTTP_HOST,
34 SpanAttributes.HTTP_SCHEME,
35 SpanAttributes.HTTP_STATUS_CODE,
36 SpanAttributes.HTTP_FLAVOR,
37 SpanAttributes.HTTP_SERVER_NAME,
38 SpanAttributes.NET_HOST_NAME,
39 SpanAttributes.NET_HOST_PORT,
40 }
41
42 _active_requests_count_attrs = {
43 SpanAttributes.HTTP_METHOD,
44 SpanAttributes.HTTP_HOST,
45 SpanAttributes.HTTP_SCHEME,
46 SpanAttributes.HTTP_FLAVOR,
47 SpanAttributes.HTTP_SERVER_NAME,
48 }
49
50
51 class ExcludeList:
52 """Class to exclude certain paths (given as a list of regexes) from tracing requests"""
53
54 def __init__(self, excluded_urls: Iterable[str]):
55 self._excluded_urls = excluded_urls
56 if self._excluded_urls:
57 self._regex = re_compile("|".join(excluded_urls))
58
59 def url_disabled(self, url: str) -> bool:
60 return bool(self._excluded_urls and search(self._regex, url))
61
62
63 _root = r"OTEL_PYTHON_{}"
64
65
66 def get_traced_request_attrs(instrumentation):
67 traced_request_attrs = environ.get(
68 _root.format(f"{instrumentation}_TRACED_REQUEST_ATTRS"), []
69 )
70
71 if traced_request_attrs:
72 traced_request_attrs = [
73 traced_request_attr.strip()
74 for traced_request_attr in traced_request_attrs.split(",")
75 ]
76
77 return traced_request_attrs
78
79
80 def get_excluded_urls(instrumentation: str) -> ExcludeList:
81 # Get instrumentation-specific excluded URLs. If not set, retrieve them
82 # from generic variable.
83 excluded_urls = environ.get(
84 _root.format(f"{instrumentation}_EXCLUDED_URLS"),
85 environ.get(_root.format("EXCLUDED_URLS"), ""),
86 )
87
88 return parse_excluded_urls(excluded_urls)
89
90
91 def parse_excluded_urls(excluded_urls: str) -> ExcludeList:
92 """
93 Small helper to put an arbitrary url list inside of ExcludeList
94 """
95 if excluded_urls:
96 excluded_url_list = [
97 excluded_url.strip() for excluded_url in excluded_urls.split(",")
98 ]
99 else:
100 excluded_url_list = []
101
102 return ExcludeList(excluded_url_list)
103
104
105 def remove_url_credentials(url: str) -> str:
106 """Given a string url, remove the username and password only if it is a valid url"""
107
108 try:
109 parsed = urlparse(url)
110 if all([parsed.scheme, parsed.netloc]): # checks for valid url
111 parsed_url = urlparse(url)
112 netloc = (
113 (":".join(((parsed_url.hostname or ""), str(parsed_url.port))))
114 if parsed_url.port
115 else (parsed_url.hostname or "")
116 )
117 return urlunparse(
118 (
119 parsed_url.scheme,
120 netloc,
121 parsed_url.path,
122 parsed_url.params,
123 parsed_url.query,
124 parsed_url.fragment,
125 )
126 )
127 except ValueError: # an unparsable url was passed
128 pass
129 return url
130
131
132 def normalise_request_header_name(header: str) -> str:
133 key = header.lower().replace("-", "_")
134 return f"http.request.header.{key}"
135
136
137 def normalise_response_header_name(header: str) -> str:
138 key = header.lower().replace("-", "_")
139 return f"http.response.header.{key}"
140
141
142 def get_custom_headers(env_var: str) -> List[str]:
143 custom_headers = environ.get(env_var, [])
144 if custom_headers:
145 custom_headers = [
146 custom_headers.strip()
147 for custom_headers in custom_headers.split(",")
148 ]
149 return custom_headers
150
151
152 def _parse_active_request_count_attrs(req_attrs):
153 active_requests_count_attrs = {
154 key: req_attrs[key]
155 for key in _active_requests_count_attrs.intersection(req_attrs.keys())
156 }
157 return active_requests_count_attrs
158
159
160 def _parse_duration_attrs(req_attrs):
161 duration_attrs = {
162 key: req_attrs[key]
163 for key in _duration_attrs.intersection(req_attrs.keys())
164 }
165 return duration_attrs
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py b/util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py
--- a/util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py
+++ b/util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py
@@ -13,6 +13,7 @@
# limitations under the License.
from os import environ
+from re import IGNORECASE as RE_IGNORECASE
from re import compile as re_compile
from re import search
from typing import Iterable, List
@@ -20,6 +21,9 @@
from opentelemetry.semconv.trace import SpanAttributes
+OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS = (
+ "OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS"
+)
OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST = (
"OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST"
)
@@ -60,6 +64,22 @@
return bool(self._excluded_urls and search(self._regex, url))
+class SanitizeValue:
+ """Class to sanitize (remove sensitive data from) certain headers (given as a list of regexes)"""
+
+ def __init__(self, sanitized_fields: Iterable[str]):
+ self._sanitized_fields = sanitized_fields
+ if self._sanitized_fields:
+ self._regex = re_compile("|".join(sanitized_fields), RE_IGNORECASE)
+
+ def sanitize_header_value(self, header: str, value: str) -> str:
+ return (
+ "[REDACTED]"
+ if (self._sanitized_fields and search(self._regex, header))
+ else value
+ )
+
+
_root = r"OTEL_PYTHON_{}"
@@ -90,7 +110,7 @@
def parse_excluded_urls(excluded_urls: str) -> ExcludeList:
"""
- Small helper to put an arbitrary url list inside of ExcludeList
+ Small helper to put an arbitrary url list inside an ExcludeList
"""
if excluded_urls:
excluded_url_list = [
| {"golden_diff": "diff --git a/util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py b/util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py\n--- a/util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py\n+++ b/util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py\n@@ -13,6 +13,7 @@\n # limitations under the License.\n \n from os import environ\n+from re import IGNORECASE as RE_IGNORECASE\n from re import compile as re_compile\n from re import search\n from typing import Iterable, List\n@@ -20,6 +21,9 @@\n \n from opentelemetry.semconv.trace import SpanAttributes\n \n+OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS = (\n+ \"OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS\"\n+)\n OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST = (\n \"OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST\"\n )\n@@ -60,6 +64,22 @@\n return bool(self._excluded_urls and search(self._regex, url))\n \n \n+class SanitizeValue:\n+ \"\"\"Class to sanitize (remove sensitive data from) certain headers (given as a list of regexes)\"\"\"\n+\n+ def __init__(self, sanitized_fields: Iterable[str]):\n+ self._sanitized_fields = sanitized_fields\n+ if self._sanitized_fields:\n+ self._regex = re_compile(\"|\".join(sanitized_fields), RE_IGNORECASE)\n+\n+ def sanitize_header_value(self, header: str, value: str) -> str:\n+ return (\n+ \"[REDACTED]\"\n+ if (self._sanitized_fields and search(self._regex, header))\n+ else value\n+ )\n+\n+\n _root = r\"OTEL_PYTHON_{}\"\n \n \n@@ -90,7 +110,7 @@\n \n def parse_excluded_urls(excluded_urls: str) -> ExcludeList:\n \"\"\"\n- Small helper to put an arbitrary url list inside of ExcludeList\n+ Small helper to put an arbitrary url list inside an ExcludeList\n \"\"\"\n if excluded_urls:\n excluded_url_list = [\n", "issue": "Add more features for adding HTTP request / response headers to spans.\nI already have https://github.com/open-telemetry/opentelemetry-python-contrib/pull/1172 open for this, and I'll be breaking it in to smaller pieces at @lzchen 's request.\r\n\r\n**Is your feature request related to a problem?**\r\nCurrently, you can only provide a list of full HTTP request / response header names to be added to the span.\r\n\r\nThere is also no capacity for header value redaction.\r\n\r\n**Describe the solution you'd like**\r\nIt would be nice to be able to specify a regex or \"all\" to get all headers.\r\n\r\nHeader value redaction is also a must-have for us.\r\n\r\n**Describe alternatives you've considered**\r\nI considered doing this in my application, but it makes more sense to add it here.\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom os import environ\nfrom re import compile as re_compile\nfrom re import search\nfrom typing import Iterable, List\nfrom urllib.parse import urlparse, urlunparse\n\nfrom opentelemetry.semconv.trace import SpanAttributes\n\nOTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST = (\n \"OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST\"\n)\nOTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE = (\n \"OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE\"\n)\n\n# List of recommended metrics attributes\n_duration_attrs = {\n SpanAttributes.HTTP_METHOD,\n SpanAttributes.HTTP_HOST,\n SpanAttributes.HTTP_SCHEME,\n SpanAttributes.HTTP_STATUS_CODE,\n SpanAttributes.HTTP_FLAVOR,\n SpanAttributes.HTTP_SERVER_NAME,\n SpanAttributes.NET_HOST_NAME,\n SpanAttributes.NET_HOST_PORT,\n}\n\n_active_requests_count_attrs = {\n SpanAttributes.HTTP_METHOD,\n SpanAttributes.HTTP_HOST,\n SpanAttributes.HTTP_SCHEME,\n SpanAttributes.HTTP_FLAVOR,\n SpanAttributes.HTTP_SERVER_NAME,\n}\n\n\nclass ExcludeList:\n \"\"\"Class to exclude certain paths (given as a list of regexes) from tracing requests\"\"\"\n\n def __init__(self, excluded_urls: Iterable[str]):\n self._excluded_urls = excluded_urls\n if self._excluded_urls:\n self._regex = re_compile(\"|\".join(excluded_urls))\n\n def url_disabled(self, url: str) -> bool:\n return bool(self._excluded_urls and search(self._regex, url))\n\n\n_root = r\"OTEL_PYTHON_{}\"\n\n\ndef get_traced_request_attrs(instrumentation):\n traced_request_attrs = environ.get(\n _root.format(f\"{instrumentation}_TRACED_REQUEST_ATTRS\"), []\n )\n\n if traced_request_attrs:\n traced_request_attrs = [\n traced_request_attr.strip()\n for traced_request_attr in traced_request_attrs.split(\",\")\n ]\n\n return traced_request_attrs\n\n\ndef get_excluded_urls(instrumentation: str) -> ExcludeList:\n # Get instrumentation-specific excluded URLs. If not set, retrieve them\n # from generic variable.\n excluded_urls = environ.get(\n _root.format(f\"{instrumentation}_EXCLUDED_URLS\"),\n environ.get(_root.format(\"EXCLUDED_URLS\"), \"\"),\n )\n\n return parse_excluded_urls(excluded_urls)\n\n\ndef parse_excluded_urls(excluded_urls: str) -> ExcludeList:\n \"\"\"\n Small helper to put an arbitrary url list inside of ExcludeList\n \"\"\"\n if excluded_urls:\n excluded_url_list = [\n excluded_url.strip() for excluded_url in excluded_urls.split(\",\")\n ]\n else:\n excluded_url_list = []\n\n return ExcludeList(excluded_url_list)\n\n\ndef remove_url_credentials(url: str) -> str:\n \"\"\"Given a string url, remove the username and password only if it is a valid url\"\"\"\n\n try:\n parsed = urlparse(url)\n if all([parsed.scheme, parsed.netloc]): # checks for valid url\n parsed_url = urlparse(url)\n netloc = (\n (\":\".join(((parsed_url.hostname or \"\"), str(parsed_url.port))))\n if parsed_url.port\n else (parsed_url.hostname or \"\")\n )\n return urlunparse(\n (\n parsed_url.scheme,\n netloc,\n parsed_url.path,\n parsed_url.params,\n parsed_url.query,\n parsed_url.fragment,\n )\n )\n except ValueError: # an unparsable url was passed\n pass\n return url\n\n\ndef normalise_request_header_name(header: str) -> str:\n key = header.lower().replace(\"-\", \"_\")\n return f\"http.request.header.{key}\"\n\n\ndef normalise_response_header_name(header: str) -> str:\n key = header.lower().replace(\"-\", \"_\")\n return f\"http.response.header.{key}\"\n\n\ndef get_custom_headers(env_var: str) -> List[str]:\n custom_headers = environ.get(env_var, [])\n if custom_headers:\n custom_headers = [\n custom_headers.strip()\n for custom_headers in custom_headers.split(\",\")\n ]\n return custom_headers\n\n\ndef _parse_active_request_count_attrs(req_attrs):\n active_requests_count_attrs = {\n key: req_attrs[key]\n for key in _active_requests_count_attrs.intersection(req_attrs.keys())\n }\n return active_requests_count_attrs\n\n\ndef _parse_duration_attrs(req_attrs):\n duration_attrs = {\n key: req_attrs[key]\n for key in _duration_attrs.intersection(req_attrs.keys())\n }\n return duration_attrs\n", "path": "util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom os import environ\nfrom re import IGNORECASE as RE_IGNORECASE\nfrom re import compile as re_compile\nfrom re import search\nfrom typing import Iterable, List\nfrom urllib.parse import urlparse, urlunparse\n\nfrom opentelemetry.semconv.trace import SpanAttributes\n\nOTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS = (\n \"OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SANITIZE_FIELDS\"\n)\nOTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST = (\n \"OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_REQUEST\"\n)\nOTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE = (\n \"OTEL_INSTRUMENTATION_HTTP_CAPTURE_HEADERS_SERVER_RESPONSE\"\n)\n\n# List of recommended metrics attributes\n_duration_attrs = {\n SpanAttributes.HTTP_METHOD,\n SpanAttributes.HTTP_HOST,\n SpanAttributes.HTTP_SCHEME,\n SpanAttributes.HTTP_STATUS_CODE,\n SpanAttributes.HTTP_FLAVOR,\n SpanAttributes.HTTP_SERVER_NAME,\n SpanAttributes.NET_HOST_NAME,\n SpanAttributes.NET_HOST_PORT,\n}\n\n_active_requests_count_attrs = {\n SpanAttributes.HTTP_METHOD,\n SpanAttributes.HTTP_HOST,\n SpanAttributes.HTTP_SCHEME,\n SpanAttributes.HTTP_FLAVOR,\n SpanAttributes.HTTP_SERVER_NAME,\n}\n\n\nclass ExcludeList:\n \"\"\"Class to exclude certain paths (given as a list of regexes) from tracing requests\"\"\"\n\n def __init__(self, excluded_urls: Iterable[str]):\n self._excluded_urls = excluded_urls\n if self._excluded_urls:\n self._regex = re_compile(\"|\".join(excluded_urls))\n\n def url_disabled(self, url: str) -> bool:\n return bool(self._excluded_urls and search(self._regex, url))\n\n\nclass SanitizeValue:\n \"\"\"Class to sanitize (remove sensitive data from) certain headers (given as a list of regexes)\"\"\"\n\n def __init__(self, sanitized_fields: Iterable[str]):\n self._sanitized_fields = sanitized_fields\n if self._sanitized_fields:\n self._regex = re_compile(\"|\".join(sanitized_fields), RE_IGNORECASE)\n\n def sanitize_header_value(self, header: str, value: str) -> str:\n return (\n \"[REDACTED]\"\n if (self._sanitized_fields and search(self._regex, header))\n else value\n )\n\n\n_root = r\"OTEL_PYTHON_{}\"\n\n\ndef get_traced_request_attrs(instrumentation):\n traced_request_attrs = environ.get(\n _root.format(f\"{instrumentation}_TRACED_REQUEST_ATTRS\"), []\n )\n\n if traced_request_attrs:\n traced_request_attrs = [\n traced_request_attr.strip()\n for traced_request_attr in traced_request_attrs.split(\",\")\n ]\n\n return traced_request_attrs\n\n\ndef get_excluded_urls(instrumentation: str) -> ExcludeList:\n # Get instrumentation-specific excluded URLs. If not set, retrieve them\n # from generic variable.\n excluded_urls = environ.get(\n _root.format(f\"{instrumentation}_EXCLUDED_URLS\"),\n environ.get(_root.format(\"EXCLUDED_URLS\"), \"\"),\n )\n\n return parse_excluded_urls(excluded_urls)\n\n\ndef parse_excluded_urls(excluded_urls: str) -> ExcludeList:\n \"\"\"\n Small helper to put an arbitrary url list inside an ExcludeList\n \"\"\"\n if excluded_urls:\n excluded_url_list = [\n excluded_url.strip() for excluded_url in excluded_urls.split(\",\")\n ]\n else:\n excluded_url_list = []\n\n return ExcludeList(excluded_url_list)\n\n\ndef remove_url_credentials(url: str) -> str:\n \"\"\"Given a string url, remove the username and password only if it is a valid url\"\"\"\n\n try:\n parsed = urlparse(url)\n if all([parsed.scheme, parsed.netloc]): # checks for valid url\n parsed_url = urlparse(url)\n netloc = (\n (\":\".join(((parsed_url.hostname or \"\"), str(parsed_url.port))))\n if parsed_url.port\n else (parsed_url.hostname or \"\")\n )\n return urlunparse(\n (\n parsed_url.scheme,\n netloc,\n parsed_url.path,\n parsed_url.params,\n parsed_url.query,\n parsed_url.fragment,\n )\n )\n except ValueError: # an unparsable url was passed\n pass\n return url\n\n\ndef normalise_request_header_name(header: str) -> str:\n key = header.lower().replace(\"-\", \"_\")\n return f\"http.request.header.{key}\"\n\n\ndef normalise_response_header_name(header: str) -> str:\n key = header.lower().replace(\"-\", \"_\")\n return f\"http.response.header.{key}\"\n\n\ndef get_custom_headers(env_var: str) -> List[str]:\n custom_headers = environ.get(env_var, [])\n if custom_headers:\n custom_headers = [\n custom_headers.strip()\n for custom_headers in custom_headers.split(\",\")\n ]\n return custom_headers\n\n\ndef _parse_active_request_count_attrs(req_attrs):\n active_requests_count_attrs = {\n key: req_attrs[key]\n for key in _active_requests_count_attrs.intersection(req_attrs.keys())\n }\n return active_requests_count_attrs\n\n\ndef _parse_duration_attrs(req_attrs):\n duration_attrs = {\n key: req_attrs[key]\n for key in _duration_attrs.intersection(req_attrs.keys())\n }\n return duration_attrs\n", "path": "util/opentelemetry-util-http/src/opentelemetry/util/http/__init__.py"}]} | 1,912 | 475 |
gh_patches_debug_2683 | rasdani/github-patches | git_diff | huggingface__huggingface_hub-790 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support python=3.10
Python 3.10 has been out for a while but we seem to not test for it. What are the roadblocks for us to support 3.10 and maybe deprecate 3.6? (Many packages now support 3.8-3.10 and older versions are not supported anymore).
Ping @LysandreJik @osanseviero maybe?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import find_packages, setup
2
3
4 def get_version() -> str:
5 rel_path = "src/huggingface_hub/__init__.py"
6 with open(rel_path, "r") as fp:
7 for line in fp.read().splitlines():
8 if line.startswith("__version__"):
9 delim = '"' if '"' in line else "'"
10 return line.split(delim)[1]
11 raise RuntimeError("Unable to find version string.")
12
13
14 install_requires = [
15 "filelock",
16 "requests",
17 "tqdm",
18 "pyyaml",
19 "typing-extensions>=3.7.4.3", # to be able to import TypeAlias
20 "importlib_metadata;python_version<'3.8'",
21 "packaging>=20.9",
22 ]
23
24 extras = {}
25
26 extras["torch"] = [
27 "torch",
28 ]
29
30 extras["tensorflow"] = [
31 "tensorflow",
32 "pydot",
33 "graphviz"
34 ]
35
36 extras["testing"] = [
37 "pytest",
38 "datasets",
39 "soundfile",
40 ]
41
42 extras["quality"] = [
43 "black~=22.0",
44 "isort>=5.5.4",
45 "flake8>=3.8.3",
46 ]
47
48 extras["all"] = extras["testing"] + extras["quality"]
49
50 extras["dev"] = extras["all"]
51
52
53 setup(
54 name="huggingface_hub",
55 version=get_version(),
56 author="Hugging Face, Inc.",
57 author_email="[email protected]",
58 description="Client library to download and publish models on the huggingface.co hub",
59 long_description=open("README.md", "r", encoding="utf-8").read(),
60 long_description_content_type="text/markdown",
61 keywords="model-hub machine-learning models natural-language-processing deep-learning pytorch pretrained-models",
62 license="Apache",
63 url="https://github.com/huggingface/huggingface_hub",
64 package_dir={"": "src"},
65 packages=find_packages("src"),
66 extras_require=extras,
67 entry_points={
68 "console_scripts": [
69 "huggingface-cli=huggingface_hub.commands.huggingface_cli:main"
70 ]
71 },
72 python_requires=">=3.6.0",
73 install_requires=install_requires,
74 classifiers=[
75 "Intended Audience :: Developers",
76 "Intended Audience :: Education",
77 "Intended Audience :: Science/Research",
78 "License :: OSI Approved :: Apache Software License",
79 "Operating System :: OS Independent",
80 "Programming Language :: Python :: 3",
81 "Topic :: Scientific/Engineering :: Artificial Intelligence",
82 ],
83 )
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -69,7 +69,7 @@
"huggingface-cli=huggingface_hub.commands.huggingface_cli:main"
]
},
- python_requires=">=3.6.0",
+ python_requires=">=3.7.0",
install_requires=install_requires,
classifiers=[
"Intended Audience :: Developers",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -69,7 +69,7 @@\n \"huggingface-cli=huggingface_hub.commands.huggingface_cli:main\"\n ]\n },\n- python_requires=\">=3.6.0\",\n+ python_requires=\">=3.7.0\",\n install_requires=install_requires,\n classifiers=[\n \"Intended Audience :: Developers\",\n", "issue": "Support python=3.10\nPython 3.10 has been out for a while but we seem to not test for it. What are the roadblocks for us to support 3.10 and maybe deprecate 3.6? (Many packages now support 3.8-3.10 and older versions are not supported anymore).\r\n\r\nPing @LysandreJik @osanseviero maybe?\n", "before_files": [{"content": "from setuptools import find_packages, setup\n\n\ndef get_version() -> str:\n rel_path = \"src/huggingface_hub/__init__.py\"\n with open(rel_path, \"r\") as fp:\n for line in fp.read().splitlines():\n if line.startswith(\"__version__\"):\n delim = '\"' if '\"' in line else \"'\"\n return line.split(delim)[1]\n raise RuntimeError(\"Unable to find version string.\")\n\n\ninstall_requires = [\n \"filelock\",\n \"requests\",\n \"tqdm\",\n \"pyyaml\",\n \"typing-extensions>=3.7.4.3\", # to be able to import TypeAlias\n \"importlib_metadata;python_version<'3.8'\",\n \"packaging>=20.9\",\n]\n\nextras = {}\n\nextras[\"torch\"] = [\n \"torch\",\n]\n\nextras[\"tensorflow\"] = [\n \"tensorflow\",\n \"pydot\",\n \"graphviz\"\n]\n\nextras[\"testing\"] = [\n \"pytest\",\n \"datasets\",\n \"soundfile\",\n]\n\nextras[\"quality\"] = [\n \"black~=22.0\",\n \"isort>=5.5.4\",\n \"flake8>=3.8.3\",\n]\n\nextras[\"all\"] = extras[\"testing\"] + extras[\"quality\"]\n\nextras[\"dev\"] = extras[\"all\"]\n\n\nsetup(\n name=\"huggingface_hub\",\n version=get_version(),\n author=\"Hugging Face, Inc.\",\n author_email=\"[email protected]\",\n description=\"Client library to download and publish models on the huggingface.co hub\",\n long_description=open(\"README.md\", \"r\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n keywords=\"model-hub machine-learning models natural-language-processing deep-learning pytorch pretrained-models\",\n license=\"Apache\",\n url=\"https://github.com/huggingface/huggingface_hub\",\n package_dir={\"\": \"src\"},\n packages=find_packages(\"src\"),\n extras_require=extras,\n entry_points={\n \"console_scripts\": [\n \"huggingface-cli=huggingface_hub.commands.huggingface_cli:main\"\n ]\n },\n python_requires=\">=3.6.0\",\n install_requires=install_requires,\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import find_packages, setup\n\n\ndef get_version() -> str:\n rel_path = \"src/huggingface_hub/__init__.py\"\n with open(rel_path, \"r\") as fp:\n for line in fp.read().splitlines():\n if line.startswith(\"__version__\"):\n delim = '\"' if '\"' in line else \"'\"\n return line.split(delim)[1]\n raise RuntimeError(\"Unable to find version string.\")\n\n\ninstall_requires = [\n \"filelock\",\n \"requests\",\n \"tqdm\",\n \"pyyaml\",\n \"typing-extensions>=3.7.4.3\", # to be able to import TypeAlias\n \"importlib_metadata;python_version<'3.8'\",\n \"packaging>=20.9\",\n]\n\nextras = {}\n\nextras[\"torch\"] = [\n \"torch\",\n]\n\nextras[\"tensorflow\"] = [\n \"tensorflow\",\n \"pydot\",\n \"graphviz\"\n]\n\nextras[\"testing\"] = [\n \"pytest\",\n \"datasets\",\n \"soundfile\",\n]\n\nextras[\"quality\"] = [\n \"black~=22.0\",\n \"isort>=5.5.4\",\n \"flake8>=3.8.3\",\n]\n\nextras[\"all\"] = extras[\"testing\"] + extras[\"quality\"]\n\nextras[\"dev\"] = extras[\"all\"]\n\n\nsetup(\n name=\"huggingface_hub\",\n version=get_version(),\n author=\"Hugging Face, Inc.\",\n author_email=\"[email protected]\",\n description=\"Client library to download and publish models on the huggingface.co hub\",\n long_description=open(\"README.md\", \"r\", encoding=\"utf-8\").read(),\n long_description_content_type=\"text/markdown\",\n keywords=\"model-hub machine-learning models natural-language-processing deep-learning pytorch pretrained-models\",\n license=\"Apache\",\n url=\"https://github.com/huggingface/huggingface_hub\",\n package_dir={\"\": \"src\"},\n packages=find_packages(\"src\"),\n extras_require=extras,\n entry_points={\n \"console_scripts\": [\n \"huggingface-cli=huggingface_hub.commands.huggingface_cli:main\"\n ]\n },\n python_requires=\">=3.7.0\",\n install_requires=install_requires,\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "setup.py"}]} | 1,063 | 96 |
gh_patches_debug_36073 | rasdani/github-patches | git_diff | streamlink__streamlink-5711 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.wasd: service gone
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
6.4.2
### Description
A few days ago, the service [gone](https://mts.ru/personal/novosti/2023-12-05/vstrechajte-polzovatelskuyu-videoplatformu-nuum). Now this [nuum.ru](https://nuum.ru).
Though we could easily replace the plugin, but I'm not sure it's worth adding it to upstream, because it's a beta version.
<details>
```diff
diff --git a/src/streamlink/plugins/wasd.py b/src/streamlink/plugins/wasd.py
index 7d61304e..656a16eb 100644
--- a/src/streamlink/plugins/wasd.py
+++ b/src/streamlink/plugins/wasd.py
@@ -16,7 +16,7 @@ log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
- r"https?://(?:www\.)?wasd\.tv/(?P<nickname>[^/]+)/?$",
+ r"https?://(?:www\.)?nuum\.ru/channel/(?P<nickname>[^/]+)/?$",
))
class WASD(Plugin):
_media_schema = validate.Schema({
@@ -53,11 +53,11 @@ class WASD(Plugin):
def _get_streams(self):
nickname = self.match.group("nickname")
- res = self.session.http.get(f"https://wasd.tv/api/channels/nicknames/{nickname}")
+ res = self.session.http.get(f"https://nuum.ru/api/channels/nicknames/{nickname}")
channel_id = self.session.http.json(res, schema=self._api_nicknames_schema)
res = self.session.http.get(
- "https://wasd.tv/api/v2/media-containers",
+ "https://nuum.ru/api/v2/media-containers",
params={
"media_container_status": "RUNNING",
"limit": "1",
```
</details>
### Debug log
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/wasd.py`
Content:
```
1 """
2 $description Russian live-streaming social platform.
3 $url wasd.tv
4 $type live
5 """
6
7 import logging
8 import re
9
10 from streamlink.plugin import Plugin, PluginError, pluginmatcher
11 from streamlink.plugin.api import validate
12 from streamlink.stream.hls import HLSStream
13
14
15 log = logging.getLogger(__name__)
16
17
18 @pluginmatcher(re.compile(
19 r"https?://(?:www\.)?wasd\.tv/(?P<nickname>[^/]+)/?$",
20 ))
21 class WASD(Plugin):
22 _media_schema = validate.Schema({
23 "user_id": int,
24 "media_container_online_status": str,
25 "media_container_status": str,
26 "media_container_streams": [{
27 "stream_media": [{
28 "media_id": int,
29 "media_meta": {
30 "media_url": validate.any(str, None),
31 "media_archive_url": validate.any(str, None),
32 },
33 "media_status": validate.any("STOPPED", "RUNNING"),
34 "media_type": "HLS",
35 }],
36 }],
37 })
38 _api_schema = validate.Schema({
39 "result":
40 validate.any(
41 _media_schema,
42 validate.all(list,
43 validate.get(0),
44 _media_schema),
45 [],
46 ),
47 }, validate.get("result"))
48 _api_nicknames_schema = validate.Schema({
49 "result": {
50 "channel_id": int,
51 },
52 }, validate.get("result"), validate.get("channel_id"))
53
54 def _get_streams(self):
55 nickname = self.match.group("nickname")
56 res = self.session.http.get(f"https://wasd.tv/api/channels/nicknames/{nickname}")
57 channel_id = self.session.http.json(res, schema=self._api_nicknames_schema)
58
59 res = self.session.http.get(
60 "https://wasd.tv/api/v2/media-containers",
61 params={
62 "media_container_status": "RUNNING",
63 "limit": "1",
64 "offset": "0",
65 "channel_id": channel_id,
66 "media_container_type": "SINGLE,COOP",
67 },
68 )
69
70 json_res = self.session.http.json(res, schema=self._api_schema)
71 log.trace("{0!r}".format(json_res))
72 if not json_res:
73 raise PluginError("No data returned from URL={0}".format(res.url))
74
75 for stream in json_res["media_container_streams"]:
76 log.debug("media_container_status: {0}, media_container_online_status: {1}".format(
77 json_res["media_container_status"], json_res["media_container_online_status"]))
78 for stream_media in stream["stream_media"]:
79 if stream_media["media_status"] == "STOPPED":
80 hls_url = stream_media["media_meta"]["media_archive_url"]
81 else:
82 hls_url = stream_media["media_meta"]["media_url"]
83
84 yield from HLSStream.parse_variant_playlist(self.session, hls_url).items()
85
86
87 __plugin__ = WASD
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/wasd.py b/src/streamlink/plugins/wasd.py
deleted file mode 100644
--- a/src/streamlink/plugins/wasd.py
+++ /dev/null
@@ -1,87 +0,0 @@
-"""
-$description Russian live-streaming social platform.
-$url wasd.tv
-$type live
-"""
-
-import logging
-import re
-
-from streamlink.plugin import Plugin, PluginError, pluginmatcher
-from streamlink.plugin.api import validate
-from streamlink.stream.hls import HLSStream
-
-
-log = logging.getLogger(__name__)
-
-
-@pluginmatcher(re.compile(
- r"https?://(?:www\.)?wasd\.tv/(?P<nickname>[^/]+)/?$",
-))
-class WASD(Plugin):
- _media_schema = validate.Schema({
- "user_id": int,
- "media_container_online_status": str,
- "media_container_status": str,
- "media_container_streams": [{
- "stream_media": [{
- "media_id": int,
- "media_meta": {
- "media_url": validate.any(str, None),
- "media_archive_url": validate.any(str, None),
- },
- "media_status": validate.any("STOPPED", "RUNNING"),
- "media_type": "HLS",
- }],
- }],
- })
- _api_schema = validate.Schema({
- "result":
- validate.any(
- _media_schema,
- validate.all(list,
- validate.get(0),
- _media_schema),
- [],
- ),
- }, validate.get("result"))
- _api_nicknames_schema = validate.Schema({
- "result": {
- "channel_id": int,
- },
- }, validate.get("result"), validate.get("channel_id"))
-
- def _get_streams(self):
- nickname = self.match.group("nickname")
- res = self.session.http.get(f"https://wasd.tv/api/channels/nicknames/{nickname}")
- channel_id = self.session.http.json(res, schema=self._api_nicknames_schema)
-
- res = self.session.http.get(
- "https://wasd.tv/api/v2/media-containers",
- params={
- "media_container_status": "RUNNING",
- "limit": "1",
- "offset": "0",
- "channel_id": channel_id,
- "media_container_type": "SINGLE,COOP",
- },
- )
-
- json_res = self.session.http.json(res, schema=self._api_schema)
- log.trace("{0!r}".format(json_res))
- if not json_res:
- raise PluginError("No data returned from URL={0}".format(res.url))
-
- for stream in json_res["media_container_streams"]:
- log.debug("media_container_status: {0}, media_container_online_status: {1}".format(
- json_res["media_container_status"], json_res["media_container_online_status"]))
- for stream_media in stream["stream_media"]:
- if stream_media["media_status"] == "STOPPED":
- hls_url = stream_media["media_meta"]["media_archive_url"]
- else:
- hls_url = stream_media["media_meta"]["media_url"]
-
- yield from HLSStream.parse_variant_playlist(self.session, hls_url).items()
-
-
-__plugin__ = WASD
| {"golden_diff": "diff --git a/src/streamlink/plugins/wasd.py b/src/streamlink/plugins/wasd.py\ndeleted file mode 100644\n--- a/src/streamlink/plugins/wasd.py\n+++ /dev/null\n@@ -1,87 +0,0 @@\n-\"\"\"\n-$description Russian live-streaming social platform.\n-$url wasd.tv\n-$type live\n-\"\"\"\n-\n-import logging\n-import re\n-\n-from streamlink.plugin import Plugin, PluginError, pluginmatcher\n-from streamlink.plugin.api import validate\n-from streamlink.stream.hls import HLSStream\n-\n-\n-log = logging.getLogger(__name__)\n-\n-\n-@pluginmatcher(re.compile(\n- r\"https?://(?:www\\.)?wasd\\.tv/(?P<nickname>[^/]+)/?$\",\n-))\n-class WASD(Plugin):\n- _media_schema = validate.Schema({\n- \"user_id\": int,\n- \"media_container_online_status\": str,\n- \"media_container_status\": str,\n- \"media_container_streams\": [{\n- \"stream_media\": [{\n- \"media_id\": int,\n- \"media_meta\": {\n- \"media_url\": validate.any(str, None),\n- \"media_archive_url\": validate.any(str, None),\n- },\n- \"media_status\": validate.any(\"STOPPED\", \"RUNNING\"),\n- \"media_type\": \"HLS\",\n- }],\n- }],\n- })\n- _api_schema = validate.Schema({\n- \"result\":\n- validate.any(\n- _media_schema,\n- validate.all(list,\n- validate.get(0),\n- _media_schema),\n- [],\n- ),\n- }, validate.get(\"result\"))\n- _api_nicknames_schema = validate.Schema({\n- \"result\": {\n- \"channel_id\": int,\n- },\n- }, validate.get(\"result\"), validate.get(\"channel_id\"))\n-\n- def _get_streams(self):\n- nickname = self.match.group(\"nickname\")\n- res = self.session.http.get(f\"https://wasd.tv/api/channels/nicknames/{nickname}\")\n- channel_id = self.session.http.json(res, schema=self._api_nicknames_schema)\n-\n- res = self.session.http.get(\n- \"https://wasd.tv/api/v2/media-containers\",\n- params={\n- \"media_container_status\": \"RUNNING\",\n- \"limit\": \"1\",\n- \"offset\": \"0\",\n- \"channel_id\": channel_id,\n- \"media_container_type\": \"SINGLE,COOP\",\n- },\n- )\n-\n- json_res = self.session.http.json(res, schema=self._api_schema)\n- log.trace(\"{0!r}\".format(json_res))\n- if not json_res:\n- raise PluginError(\"No data returned from URL={0}\".format(res.url))\n-\n- for stream in json_res[\"media_container_streams\"]:\n- log.debug(\"media_container_status: {0}, media_container_online_status: {1}\".format(\n- json_res[\"media_container_status\"], json_res[\"media_container_online_status\"]))\n- for stream_media in stream[\"stream_media\"]:\n- if stream_media[\"media_status\"] == \"STOPPED\":\n- hls_url = stream_media[\"media_meta\"][\"media_archive_url\"]\n- else:\n- hls_url = stream_media[\"media_meta\"][\"media_url\"]\n-\n- yield from HLSStream.parse_variant_playlist(self.session, hls_url).items()\n-\n-\n-__plugin__ = WASD\n", "issue": "plugins.wasd: service gone\n### Checklist\r\n\r\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\r\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\r\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\r\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\r\n\r\n### Streamlink version\r\n\r\n6.4.2\r\n\r\n### Description\r\n\r\nA few days ago, the service [gone](https://mts.ru/personal/novosti/2023-12-05/vstrechajte-polzovatelskuyu-videoplatformu-nuum). Now this [nuum.ru](https://nuum.ru).\r\n\r\nThough we could easily replace the plugin, but I'm not sure it's worth adding it to upstream, because it's a beta version.\r\n<details>\r\n\r\n```diff\r\ndiff --git a/src/streamlink/plugins/wasd.py b/src/streamlink/plugins/wasd.py\r\nindex 7d61304e..656a16eb 100644\r\n--- a/src/streamlink/plugins/wasd.py\r\n+++ b/src/streamlink/plugins/wasd.py\r\n@@ -16,7 +16,7 @@ log = logging.getLogger(__name__)\r\n \r\n \r\n @pluginmatcher(re.compile(\r\n- r\"https?://(?:www\\.)?wasd\\.tv/(?P<nickname>[^/]+)/?$\",\r\n+ r\"https?://(?:www\\.)?nuum\\.ru/channel/(?P<nickname>[^/]+)/?$\",\r\n ))\r\n class WASD(Plugin):\r\n _media_schema = validate.Schema({\r\n@@ -53,11 +53,11 @@ class WASD(Plugin):\r\n \r\n def _get_streams(self):\r\n nickname = self.match.group(\"nickname\")\r\n- res = self.session.http.get(f\"https://wasd.tv/api/channels/nicknames/{nickname}\")\r\n+ res = self.session.http.get(f\"https://nuum.ru/api/channels/nicknames/{nickname}\")\r\n channel_id = self.session.http.json(res, schema=self._api_nicknames_schema)\r\n \r\n res = self.session.http.get(\r\n- \"https://wasd.tv/api/v2/media-containers\",\r\n+ \"https://nuum.ru/api/v2/media-containers\",\r\n params={\r\n \"media_container_status\": \"RUNNING\",\r\n \"limit\": \"1\",\r\n```\r\n</details>\r\n\r\n### Debug log\r\n\r\n\n", "before_files": [{"content": "\"\"\"\n$description Russian live-streaming social platform.\n$url wasd.tv\n$type live\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, PluginError, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?wasd\\.tv/(?P<nickname>[^/]+)/?$\",\n))\nclass WASD(Plugin):\n _media_schema = validate.Schema({\n \"user_id\": int,\n \"media_container_online_status\": str,\n \"media_container_status\": str,\n \"media_container_streams\": [{\n \"stream_media\": [{\n \"media_id\": int,\n \"media_meta\": {\n \"media_url\": validate.any(str, None),\n \"media_archive_url\": validate.any(str, None),\n },\n \"media_status\": validate.any(\"STOPPED\", \"RUNNING\"),\n \"media_type\": \"HLS\",\n }],\n }],\n })\n _api_schema = validate.Schema({\n \"result\":\n validate.any(\n _media_schema,\n validate.all(list,\n validate.get(0),\n _media_schema),\n [],\n ),\n }, validate.get(\"result\"))\n _api_nicknames_schema = validate.Schema({\n \"result\": {\n \"channel_id\": int,\n },\n }, validate.get(\"result\"), validate.get(\"channel_id\"))\n\n def _get_streams(self):\n nickname = self.match.group(\"nickname\")\n res = self.session.http.get(f\"https://wasd.tv/api/channels/nicknames/{nickname}\")\n channel_id = self.session.http.json(res, schema=self._api_nicknames_schema)\n\n res = self.session.http.get(\n \"https://wasd.tv/api/v2/media-containers\",\n params={\n \"media_container_status\": \"RUNNING\",\n \"limit\": \"1\",\n \"offset\": \"0\",\n \"channel_id\": channel_id,\n \"media_container_type\": \"SINGLE,COOP\",\n },\n )\n\n json_res = self.session.http.json(res, schema=self._api_schema)\n log.trace(\"{0!r}\".format(json_res))\n if not json_res:\n raise PluginError(\"No data returned from URL={0}\".format(res.url))\n\n for stream in json_res[\"media_container_streams\"]:\n log.debug(\"media_container_status: {0}, media_container_online_status: {1}\".format(\n json_res[\"media_container_status\"], json_res[\"media_container_online_status\"]))\n for stream_media in stream[\"stream_media\"]:\n if stream_media[\"media_status\"] == \"STOPPED\":\n hls_url = stream_media[\"media_meta\"][\"media_archive_url\"]\n else:\n hls_url = stream_media[\"media_meta\"][\"media_url\"]\n\n yield from HLSStream.parse_variant_playlist(self.session, hls_url).items()\n\n\n__plugin__ = WASD\n", "path": "src/streamlink/plugins/wasd.py"}], "after_files": [{"content": null, "path": "src/streamlink/plugins/wasd.py"}]} | 1,658 | 742 |
gh_patches_debug_27898 | rasdani/github-patches | git_diff | pypa__pip-4046 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip freeze --requirement doesn't accept inline comments
- Pip version: 8.1.2
- Python version: 2.7.11
- Operating System: Mac OS X
### Description:
pip freeze --requirement doesn't accept inline comments
### What I've run:
```
pip freeze -r requirements.txt
```
Output:
```
Invalid requirement: 'alembic==0.8.6 # MIT license'
Traceback (most recent call last):
File ".../site-packages/pip/req/req_install.py", line 78, in __init__
req = Requirement(req)
File ".../site-packages/pip/_vendor/packaging/requirements.py", line 96, in __init__
requirement_string[e.loc:e.loc + 8]))
InvalidRequirement: Invalid requirement, parse error at "'# MIT li'"
```
requirements.txt:
```
alembic==0.8.6 # MIT license
Babel==2.3.4 # BSD license
```
`pip install -r` works for this requirements.txt file.
Documentation states:
> Whitespace followed by a # causes the # and the remainder of the line to be treated as a comment.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pip/operations/freeze.py`
Content:
```
1 from __future__ import absolute_import
2
3 import logging
4 import re
5
6 import pip
7 from pip.req import InstallRequirement
8 from pip.utils import get_installed_distributions
9 from pip._vendor import pkg_resources
10 from pip._vendor.packaging.utils import canonicalize_name
11 from pip._vendor.pkg_resources import RequirementParseError
12
13
14 logger = logging.getLogger(__name__)
15
16
17 def freeze(
18 requirement=None,
19 find_links=None, local_only=None, user_only=None, skip_regex=None,
20 default_vcs=None,
21 isolated=False,
22 wheel_cache=None,
23 skip=()):
24 find_links = find_links or []
25 skip_match = None
26
27 if skip_regex:
28 skip_match = re.compile(skip_regex).search
29
30 dependency_links = []
31
32 for dist in pkg_resources.working_set:
33 if dist.has_metadata('dependency_links.txt'):
34 dependency_links.extend(
35 dist.get_metadata_lines('dependency_links.txt')
36 )
37 for link in find_links:
38 if '#egg=' in link:
39 dependency_links.append(link)
40 for link in find_links:
41 yield '-f %s' % link
42 installations = {}
43 for dist in get_installed_distributions(local_only=local_only,
44 skip=(),
45 user_only=user_only):
46 try:
47 req = pip.FrozenRequirement.from_dist(
48 dist,
49 dependency_links
50 )
51 except RequirementParseError:
52 logger.warning(
53 "Could not parse requirement: %s",
54 dist.project_name
55 )
56 continue
57 installations[req.name] = req
58
59 if requirement:
60 # the options that don't get turned into an InstallRequirement
61 # should only be emitted once, even if the same option is in multiple
62 # requirements files, so we need to keep track of what has been emitted
63 # so that we don't emit it again if it's seen again
64 emitted_options = set()
65 for req_file_path in requirement:
66 with open(req_file_path) as req_file:
67 for line in req_file:
68 if (not line.strip() or
69 line.strip().startswith('#') or
70 (skip_match and skip_match(line)) or
71 line.startswith((
72 '-r', '--requirement',
73 '-Z', '--always-unzip',
74 '-f', '--find-links',
75 '-i', '--index-url',
76 '--pre',
77 '--trusted-host',
78 '--process-dependency-links',
79 '--extra-index-url'))):
80 line = line.rstrip()
81 if line not in emitted_options:
82 emitted_options.add(line)
83 yield line
84 continue
85
86 if line.startswith('-e') or line.startswith('--editable'):
87 if line.startswith('-e'):
88 line = line[2:].strip()
89 else:
90 line = line[len('--editable'):].strip().lstrip('=')
91 line_req = InstallRequirement.from_editable(
92 line,
93 default_vcs=default_vcs,
94 isolated=isolated,
95 wheel_cache=wheel_cache,
96 )
97 else:
98 line_req = InstallRequirement.from_line(
99 line,
100 isolated=isolated,
101 wheel_cache=wheel_cache,
102 )
103
104 if not line_req.name:
105 logger.info(
106 "Skipping line in requirement file [%s] because "
107 "it's not clear what it would install: %s",
108 req_file_path, line.strip(),
109 )
110 logger.info(
111 " (add #egg=PackageName to the URL to avoid"
112 " this warning)"
113 )
114 elif line_req.name not in installations:
115 logger.warning(
116 "Requirement file [%s] contains %s, but that "
117 "package is not installed",
118 req_file_path, line.strip(),
119 )
120 else:
121 yield str(installations[line_req.name]).rstrip()
122 del installations[line_req.name]
123
124 yield(
125 '## The following requirements were added by '
126 'pip freeze:'
127 )
128 for installation in sorted(
129 installations.values(), key=lambda x: x.name.lower()):
130 if canonicalize_name(installation.name) not in skip:
131 yield str(installation).rstrip()
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pip/operations/freeze.py b/pip/operations/freeze.py
--- a/pip/operations/freeze.py
+++ b/pip/operations/freeze.py
@@ -5,6 +5,7 @@
import pip
from pip.req import InstallRequirement
+from pip.req.req_file import COMMENT_RE
from pip.utils import get_installed_distributions
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
@@ -96,7 +97,7 @@
)
else:
line_req = InstallRequirement.from_line(
- line,
+ COMMENT_RE.sub('', line).strip(),
isolated=isolated,
wheel_cache=wheel_cache,
)
@@ -115,7 +116,7 @@
logger.warning(
"Requirement file [%s] contains %s, but that "
"package is not installed",
- req_file_path, line.strip(),
+ req_file_path, COMMENT_RE.sub('', line).strip(),
)
else:
yield str(installations[line_req.name]).rstrip()
| {"golden_diff": "diff --git a/pip/operations/freeze.py b/pip/operations/freeze.py\n--- a/pip/operations/freeze.py\n+++ b/pip/operations/freeze.py\n@@ -5,6 +5,7 @@\n \n import pip\n from pip.req import InstallRequirement\n+from pip.req.req_file import COMMENT_RE\n from pip.utils import get_installed_distributions\n from pip._vendor import pkg_resources\n from pip._vendor.packaging.utils import canonicalize_name\n@@ -96,7 +97,7 @@\n )\n else:\n line_req = InstallRequirement.from_line(\n- line,\n+ COMMENT_RE.sub('', line).strip(),\n isolated=isolated,\n wheel_cache=wheel_cache,\n )\n@@ -115,7 +116,7 @@\n logger.warning(\n \"Requirement file [%s] contains %s, but that \"\n \"package is not installed\",\n- req_file_path, line.strip(),\n+ req_file_path, COMMENT_RE.sub('', line).strip(),\n )\n else:\n yield str(installations[line_req.name]).rstrip()\n", "issue": "pip freeze --requirement doesn't accept inline comments\n- Pip version: 8.1.2\n- Python version: 2.7.11\n- Operating System: Mac OS X\n### Description:\n\npip freeze --requirement doesn't accept inline comments\n### What I've run:\n\n```\npip freeze -r requirements.txt\n```\n\nOutput:\n\n```\nInvalid requirement: 'alembic==0.8.6 # MIT license'\nTraceback (most recent call last):\n File \".../site-packages/pip/req/req_install.py\", line 78, in __init__\n req = Requirement(req)\n File \".../site-packages/pip/_vendor/packaging/requirements.py\", line 96, in __init__\n requirement_string[e.loc:e.loc + 8]))\nInvalidRequirement: Invalid requirement, parse error at \"'# MIT li'\"\n```\n\nrequirements.txt:\n\n```\nalembic==0.8.6 # MIT license\nBabel==2.3.4 # BSD license\n```\n\n`pip install -r` works for this requirements.txt file.\n\nDocumentation states:\n\n> Whitespace followed by a # causes the # and the remainder of the line to be treated as a comment.\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport re\n\nimport pip\nfrom pip.req import InstallRequirement\nfrom pip.utils import get_installed_distributions\nfrom pip._vendor import pkg_resources\nfrom pip._vendor.packaging.utils import canonicalize_name\nfrom pip._vendor.pkg_resources import RequirementParseError\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef freeze(\n requirement=None,\n find_links=None, local_only=None, user_only=None, skip_regex=None,\n default_vcs=None,\n isolated=False,\n wheel_cache=None,\n skip=()):\n find_links = find_links or []\n skip_match = None\n\n if skip_regex:\n skip_match = re.compile(skip_regex).search\n\n dependency_links = []\n\n for dist in pkg_resources.working_set:\n if dist.has_metadata('dependency_links.txt'):\n dependency_links.extend(\n dist.get_metadata_lines('dependency_links.txt')\n )\n for link in find_links:\n if '#egg=' in link:\n dependency_links.append(link)\n for link in find_links:\n yield '-f %s' % link\n installations = {}\n for dist in get_installed_distributions(local_only=local_only,\n skip=(),\n user_only=user_only):\n try:\n req = pip.FrozenRequirement.from_dist(\n dist,\n dependency_links\n )\n except RequirementParseError:\n logger.warning(\n \"Could not parse requirement: %s\",\n dist.project_name\n )\n continue\n installations[req.name] = req\n\n if requirement:\n # the options that don't get turned into an InstallRequirement\n # should only be emitted once, even if the same option is in multiple\n # requirements files, so we need to keep track of what has been emitted\n # so that we don't emit it again if it's seen again\n emitted_options = set()\n for req_file_path in requirement:\n with open(req_file_path) as req_file:\n for line in req_file:\n if (not line.strip() or\n line.strip().startswith('#') or\n (skip_match and skip_match(line)) or\n line.startswith((\n '-r', '--requirement',\n '-Z', '--always-unzip',\n '-f', '--find-links',\n '-i', '--index-url',\n '--pre',\n '--trusted-host',\n '--process-dependency-links',\n '--extra-index-url'))):\n line = line.rstrip()\n if line not in emitted_options:\n emitted_options.add(line)\n yield line\n continue\n\n if line.startswith('-e') or line.startswith('--editable'):\n if line.startswith('-e'):\n line = line[2:].strip()\n else:\n line = line[len('--editable'):].strip().lstrip('=')\n line_req = InstallRequirement.from_editable(\n line,\n default_vcs=default_vcs,\n isolated=isolated,\n wheel_cache=wheel_cache,\n )\n else:\n line_req = InstallRequirement.from_line(\n line,\n isolated=isolated,\n wheel_cache=wheel_cache,\n )\n\n if not line_req.name:\n logger.info(\n \"Skipping line in requirement file [%s] because \"\n \"it's not clear what it would install: %s\",\n req_file_path, line.strip(),\n )\n logger.info(\n \" (add #egg=PackageName to the URL to avoid\"\n \" this warning)\"\n )\n elif line_req.name not in installations:\n logger.warning(\n \"Requirement file [%s] contains %s, but that \"\n \"package is not installed\",\n req_file_path, line.strip(),\n )\n else:\n yield str(installations[line_req.name]).rstrip()\n del installations[line_req.name]\n\n yield(\n '## The following requirements were added by '\n 'pip freeze:'\n )\n for installation in sorted(\n installations.values(), key=lambda x: x.name.lower()):\n if canonicalize_name(installation.name) not in skip:\n yield str(installation).rstrip()\n", "path": "pip/operations/freeze.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport re\n\nimport pip\nfrom pip.req import InstallRequirement\nfrom pip.req.req_file import COMMENT_RE\nfrom pip.utils import get_installed_distributions\nfrom pip._vendor import pkg_resources\nfrom pip._vendor.packaging.utils import canonicalize_name\nfrom pip._vendor.pkg_resources import RequirementParseError\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef freeze(\n requirement=None,\n find_links=None, local_only=None, user_only=None, skip_regex=None,\n default_vcs=None,\n isolated=False,\n wheel_cache=None,\n skip=()):\n find_links = find_links or []\n skip_match = None\n\n if skip_regex:\n skip_match = re.compile(skip_regex).search\n\n dependency_links = []\n\n for dist in pkg_resources.working_set:\n if dist.has_metadata('dependency_links.txt'):\n dependency_links.extend(\n dist.get_metadata_lines('dependency_links.txt')\n )\n for link in find_links:\n if '#egg=' in link:\n dependency_links.append(link)\n for link in find_links:\n yield '-f %s' % link\n installations = {}\n for dist in get_installed_distributions(local_only=local_only,\n skip=(),\n user_only=user_only):\n try:\n req = pip.FrozenRequirement.from_dist(\n dist,\n dependency_links\n )\n except RequirementParseError:\n logger.warning(\n \"Could not parse requirement: %s\",\n dist.project_name\n )\n continue\n installations[req.name] = req\n\n if requirement:\n # the options that don't get turned into an InstallRequirement\n # should only be emitted once, even if the same option is in multiple\n # requirements files, so we need to keep track of what has been emitted\n # so that we don't emit it again if it's seen again\n emitted_options = set()\n for req_file_path in requirement:\n with open(req_file_path) as req_file:\n for line in req_file:\n if (not line.strip() or\n line.strip().startswith('#') or\n (skip_match and skip_match(line)) or\n line.startswith((\n '-r', '--requirement',\n '-Z', '--always-unzip',\n '-f', '--find-links',\n '-i', '--index-url',\n '--pre',\n '--trusted-host',\n '--process-dependency-links',\n '--extra-index-url'))):\n line = line.rstrip()\n if line not in emitted_options:\n emitted_options.add(line)\n yield line\n continue\n\n if line.startswith('-e') or line.startswith('--editable'):\n if line.startswith('-e'):\n line = line[2:].strip()\n else:\n line = line[len('--editable'):].strip().lstrip('=')\n line_req = InstallRequirement.from_editable(\n line,\n default_vcs=default_vcs,\n isolated=isolated,\n wheel_cache=wheel_cache,\n )\n else:\n line_req = InstallRequirement.from_line(\n COMMENT_RE.sub('', line).strip(),\n isolated=isolated,\n wheel_cache=wheel_cache,\n )\n\n if not line_req.name:\n logger.info(\n \"Skipping line in requirement file [%s] because \"\n \"it's not clear what it would install: %s\",\n req_file_path, line.strip(),\n )\n logger.info(\n \" (add #egg=PackageName to the URL to avoid\"\n \" this warning)\"\n )\n elif line_req.name not in installations:\n logger.warning(\n \"Requirement file [%s] contains %s, but that \"\n \"package is not installed\",\n req_file_path, COMMENT_RE.sub('', line).strip(),\n )\n else:\n yield str(installations[line_req.name]).rstrip()\n del installations[line_req.name]\n\n yield(\n '## The following requirements were added by '\n 'pip freeze:'\n )\n for installation in sorted(\n installations.values(), key=lambda x: x.name.lower()):\n if canonicalize_name(installation.name) not in skip:\n yield str(installation).rstrip()\n", "path": "pip/operations/freeze.py"}]} | 1,662 | 234 |
gh_patches_debug_12666 | rasdani/github-patches | git_diff | openshift__openshift-ansible-3887 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[healthchecks] the package_version check always checks for master/node packages regardless of host group
#### Description
When running `playbooks/byo/openshift-preflight/check.yml`, the `package_version` check reports failures on hosts that can't access the `atomic-openshift-{master,node}` packages even when this is expected, e.g. on etcd or lb hosts.
##### Version
```
openshift-ansible-3.5.3-1-521-g3125e72
```
##### Steps To Reproduce
1. Have a cluster with `[etcd]`, `[lb]` and/or additional "auxiliary" host groups
2. Run the `playbooks/byo/openshift-preflight/check.yml` playbook
##### Expected Results
Hosts would not report a failure when they have access to the packages they need.
##### Observed Results
Hosts that don't have access to `atomic-openshift-{master,node}` packages in their configured repos are reported as failed, even when the hosts don't need these packages.
Describe what is actually happening.
```
$ ansible-playbook playbooks/byo/openshift-preflight/check.yml
[...]
Failure summary:
1. Host: etcd2.example.com
Play: run OpenShift health checks
Task: openshift_health_check
Message: One or more checks failed
Details: {'package_availability': {'_ansible_parsed': True,
u'changed': False,
u'invocation': {u'module_args': {u'packages': []}}},
'package_update': {'_ansible_parsed': True,
u'changed': False,
u'invocation': {u'module_args': {u'packages': []}}},
'package_version': {'_ansible_parsed': True,
u'failed': True,
u'invocation': {u'module_args': {u'prefix': u'atomic-openshift',
u'version': u'v3.4'}},
u'msg': u'Not all of the required packages are available at requested version 3.4:\n atomic-openshift\n atomic-openshift-master\n atomic-openshift-node\nPlease check your subscriptions and enabled repositories.'}}
```
##### Additional Information
The inventory file used here has:
```
[OSEv3:children]
masters
nodes
etcd
lb
dns
# [...]
[etcd]
etcd2.example.com
# [...]
[lb]
lb.example.com
```
the hosts in *etcd*, *lb* and *dns* groups all fail the check.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `roles/openshift_health_checker/openshift_checks/package_version.py`
Content:
```
1 # pylint: disable=missing-docstring
2 from openshift_checks import OpenShiftCheck, get_var
3 from openshift_checks.mixins import NotContainerizedMixin
4
5
6 class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
7 """Check that available RPM packages match the required versions."""
8
9 name = "package_version"
10 tags = ["preflight"]
11
12 def run(self, tmp, task_vars):
13 rpm_prefix = get_var(task_vars, "openshift", "common", "service_type")
14 openshift_release = get_var(task_vars, "openshift_release")
15
16 args = {
17 "prefix": rpm_prefix,
18 "version": openshift_release,
19 }
20 return self.execute_module("aos_version", args, tmp, task_vars)
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -9,6 +9,13 @@
name = "package_version"
tags = ["preflight"]
+ @classmethod
+ def is_active(cls, task_vars):
+ """Skip hosts that do not have package requirements."""
+ group_names = get_var(task_vars, "group_names", default=[])
+ master_or_node = 'masters' in group_names or 'nodes' in group_names
+ return super(PackageVersion, cls).is_active(task_vars) and master_or_node
+
def run(self, tmp, task_vars):
rpm_prefix = get_var(task_vars, "openshift", "common", "service_type")
openshift_release = get_var(task_vars, "openshift_release")
| {"golden_diff": "diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py\n--- a/roles/openshift_health_checker/openshift_checks/package_version.py\n+++ b/roles/openshift_health_checker/openshift_checks/package_version.py\n@@ -9,6 +9,13 @@\n name = \"package_version\"\n tags = [\"preflight\"]\n \n+ @classmethod\n+ def is_active(cls, task_vars):\n+ \"\"\"Skip hosts that do not have package requirements.\"\"\"\n+ group_names = get_var(task_vars, \"group_names\", default=[])\n+ master_or_node = 'masters' in group_names or 'nodes' in group_names\n+ return super(PackageVersion, cls).is_active(task_vars) and master_or_node\n+\n def run(self, tmp, task_vars):\n rpm_prefix = get_var(task_vars, \"openshift\", \"common\", \"service_type\")\n openshift_release = get_var(task_vars, \"openshift_release\")\n", "issue": "[healthchecks] the package_version check always checks for master/node packages regardless of host group\n#### Description\r\n\r\nWhen running `playbooks/byo/openshift-preflight/check.yml`, the `package_version` check reports failures on hosts that can't access the `atomic-openshift-{master,node}` packages even when this is expected, e.g. on etcd or lb hosts.\r\n\r\n\r\n##### Version\r\n\r\n```\r\nopenshift-ansible-3.5.3-1-521-g3125e72\r\n```\r\n\r\n##### Steps To Reproduce\r\n1. Have a cluster with `[etcd]`, `[lb]` and/or additional \"auxiliary\" host groups\r\n2. Run the `playbooks/byo/openshift-preflight/check.yml` playbook\r\n\r\n\r\n##### Expected Results\r\nHosts would not report a failure when they have access to the packages they need.\r\n\r\n##### Observed Results\r\nHosts that don't have access to `atomic-openshift-{master,node}` packages in their configured repos are reported as failed, even when the hosts don't need these packages.\r\nDescribe what is actually happening.\r\n\r\n```\r\n$ ansible-playbook playbooks/byo/openshift-preflight/check.yml\r\n[...]\r\nFailure summary:\r\n\r\n 1. Host: etcd2.example.com\r\n Play: run OpenShift health checks\r\n Task: openshift_health_check\r\n Message: One or more checks failed\r\n Details: {'package_availability': {'_ansible_parsed': True,\r\n u'changed': False,\r\n u'invocation': {u'module_args': {u'packages': []}}},\r\n 'package_update': {'_ansible_parsed': True,\r\n u'changed': False,\r\n u'invocation': {u'module_args': {u'packages': []}}},\r\n 'package_version': {'_ansible_parsed': True,\r\n u'failed': True,\r\n u'invocation': {u'module_args': {u'prefix': u'atomic-openshift',\r\n u'version': u'v3.4'}},\r\n u'msg': u'Not all of the required packages are available at requested version 3.4:\\n atomic-openshift\\n atomic-openshift-master\\n atomic-openshift-node\\nPlease check your subscriptions and enabled repositories.'}}\r\n```\r\n\r\n##### Additional Information\r\n\r\nThe inventory file used here has:\r\n\r\n```\r\n[OSEv3:children]\r\nmasters\r\nnodes\r\netcd\r\nlb\r\ndns\r\n\r\n# [...]\r\n\r\n[etcd]\r\netcd2.example.com\r\n# [...]\r\n\r\n[lb]\r\nlb.example.com\r\n```\r\n\r\nthe hosts in *etcd*, *lb* and *dns* groups all fail the check.\r\n\r\n\r\n\n", "before_files": [{"content": "# pylint: disable=missing-docstring\nfrom openshift_checks import OpenShiftCheck, get_var\nfrom openshift_checks.mixins import NotContainerizedMixin\n\n\nclass PackageVersion(NotContainerizedMixin, OpenShiftCheck):\n \"\"\"Check that available RPM packages match the required versions.\"\"\"\n\n name = \"package_version\"\n tags = [\"preflight\"]\n\n def run(self, tmp, task_vars):\n rpm_prefix = get_var(task_vars, \"openshift\", \"common\", \"service_type\")\n openshift_release = get_var(task_vars, \"openshift_release\")\n\n args = {\n \"prefix\": rpm_prefix,\n \"version\": openshift_release,\n }\n return self.execute_module(\"aos_version\", args, tmp, task_vars)\n", "path": "roles/openshift_health_checker/openshift_checks/package_version.py"}], "after_files": [{"content": "# pylint: disable=missing-docstring\nfrom openshift_checks import OpenShiftCheck, get_var\nfrom openshift_checks.mixins import NotContainerizedMixin\n\n\nclass PackageVersion(NotContainerizedMixin, OpenShiftCheck):\n \"\"\"Check that available RPM packages match the required versions.\"\"\"\n\n name = \"package_version\"\n tags = [\"preflight\"]\n\n @classmethod\n def is_active(cls, task_vars):\n \"\"\"Skip hosts that do not have package requirements.\"\"\"\n group_names = get_var(task_vars, \"group_names\", default=[])\n master_or_node = 'masters' in group_names or 'nodes' in group_names\n return super(PackageVersion, cls).is_active(task_vars) and master_or_node\n\n def run(self, tmp, task_vars):\n rpm_prefix = get_var(task_vars, \"openshift\", \"common\", \"service_type\")\n openshift_release = get_var(task_vars, \"openshift_release\")\n\n args = {\n \"prefix\": rpm_prefix,\n \"version\": openshift_release,\n }\n return self.execute_module(\"aos_version\", args, tmp, task_vars)\n", "path": "roles/openshift_health_checker/openshift_checks/package_version.py"}]} | 1,031 | 224 |
gh_patches_debug_20166 | rasdani/github-patches | git_diff | marshmallow-code__webargs-680 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
typing issue with __version_info__ += __parsed_version__.pre
mypy issue:
```
__version_info__ += __parsed_version__.pre
```
```
src/webargs/__init__.py:14: error: Unsupported operand types for + ("Tuple[int, ...]" and "Tuple[str, int]")
```
Not sure what the problem is. I'm tempted to just add a `# type: ignore`. Any better idea, anyone?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/webargs/__init__.py`
Content:
```
1 from packaging.version import Version
2 from marshmallow.utils import missing
3
4 # Make marshmallow's validation functions importable from webargs
5 from marshmallow import validate
6
7 from webargs.core import ValidationError
8 from webargs import fields
9
10 __version__ = "8.0.1"
11 __parsed_version__ = Version(__version__)
12 __version_info__ = __parsed_version__.release
13 if __parsed_version__.pre:
14 __version_info__ += __parsed_version__.pre
15 __all__ = ("ValidationError", "fields", "missing", "validate")
16
```
Path: `setup.py`
Content:
```
1 import re
2 from setuptools import setup, find_packages
3
4 FRAMEWORKS = [
5 "Flask>=0.12.5",
6 "Django>=2.2.0",
7 "bottle>=0.12.13",
8 "tornado>=4.5.2",
9 "pyramid>=1.9.1",
10 "falcon>=2.0.0",
11 "aiohttp>=3.0.8",
12 ]
13 EXTRAS_REQUIRE = {
14 "frameworks": FRAMEWORKS,
15 "tests": [
16 "pytest",
17 "webtest==3.0.0",
18 "webtest-aiohttp==2.0.0",
19 "pytest-aiohttp>=0.3.0",
20 ]
21 + FRAMEWORKS,
22 "lint": [
23 "mypy==0.910",
24 "flake8==4.0.1",
25 "flake8-bugbear==21.11.29",
26 "pre-commit~=2.4",
27 ],
28 "docs": [
29 "Sphinx==4.3.2",
30 "sphinx-issues==2.0.0",
31 "furo==2022.1.2",
32 ]
33 + FRAMEWORKS,
34 }
35 EXTRAS_REQUIRE["dev"] = EXTRAS_REQUIRE["tests"] + EXTRAS_REQUIRE["lint"] + ["tox"]
36
37
38 def find_version(fname):
39 """Attempts to find the version number in the file names fname.
40 Raises RuntimeError if not found.
41 """
42 version = ""
43 with open(fname) as fp:
44 reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
45 for line in fp:
46 m = reg.match(line)
47 if m:
48 version = m.group(1)
49 break
50 if not version:
51 raise RuntimeError("Cannot find version information")
52 return version
53
54
55 def read(fname):
56 with open(fname) as fp:
57 content = fp.read()
58 return content
59
60
61 setup(
62 name="webargs",
63 version=find_version("src/webargs/__init__.py"),
64 description=(
65 "Declarative parsing and validation of HTTP request objects, "
66 "with built-in support for popular web frameworks, including "
67 "Flask, Django, Bottle, Tornado, Pyramid, Falcon, and aiohttp."
68 ),
69 long_description=read("README.rst"),
70 author="Steven Loria",
71 author_email="[email protected]",
72 url="https://github.com/marshmallow-code/webargs",
73 packages=find_packages("src"),
74 package_dir={"": "src"},
75 package_data={"webargs": ["py.typed"]},
76 install_requires=["marshmallow>=3.0.0", "packaging"],
77 extras_require=EXTRAS_REQUIRE,
78 license="MIT",
79 zip_safe=False,
80 keywords=(
81 "webargs",
82 "http",
83 "flask",
84 "django",
85 "bottle",
86 "tornado",
87 "aiohttp",
88 "request",
89 "arguments",
90 "validation",
91 "parameters",
92 "rest",
93 "api",
94 "marshmallow",
95 ),
96 python_requires=">=3.7",
97 classifiers=[
98 "Development Status :: 5 - Production/Stable",
99 "Intended Audience :: Developers",
100 "License :: OSI Approved :: MIT License",
101 "Natural Language :: English",
102 "Programming Language :: Python :: 3",
103 "Programming Language :: Python :: 3.7",
104 "Programming Language :: Python :: 3.8",
105 "Programming Language :: Python :: 3.9",
106 "Programming Language :: Python :: 3.10",
107 "Programming Language :: Python :: 3 :: Only",
108 "Topic :: Internet :: WWW/HTTP :: Dynamic Content",
109 "Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
110 ],
111 test_suite="tests",
112 project_urls={
113 "Changelog": "https://webargs.readthedocs.io/en/latest/changelog.html",
114 "Issues": "https://github.com/marshmallow-code/webargs/issues",
115 "Funding": "https://opencollective.com/marshmallow",
116 "Tidelift": "https://tidelift.com/subscription/pkg/pypi-webargs?utm_source=pypi-marshmallow&utm_medium=pypi", # noqa
117 },
118 )
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,7 @@
]
+ FRAMEWORKS,
"lint": [
- "mypy==0.910",
+ "mypy==0.930",
"flake8==4.0.1",
"flake8-bugbear==21.11.29",
"pre-commit~=2.4",
diff --git a/src/webargs/__init__.py b/src/webargs/__init__.py
--- a/src/webargs/__init__.py
+++ b/src/webargs/__init__.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from packaging.version import Version
from marshmallow.utils import missing
@@ -9,7 +11,9 @@
__version__ = "8.0.1"
__parsed_version__ = Version(__version__)
-__version_info__ = __parsed_version__.release
+__version_info__: tuple[int, int, int] | tuple[
+ int, int, int, str, int
+] = __parsed_version__.release # type: ignore[assignment]
if __parsed_version__.pre:
- __version_info__ += __parsed_version__.pre
+ __version_info__ += __parsed_version__.pre # type: ignore[assignment]
__all__ = ("ValidationError", "fields", "missing", "validate")
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -20,7 +20,7 @@\n ]\n + FRAMEWORKS,\n \"lint\": [\n- \"mypy==0.910\",\n+ \"mypy==0.930\",\n \"flake8==4.0.1\",\n \"flake8-bugbear==21.11.29\",\n \"pre-commit~=2.4\",\ndiff --git a/src/webargs/__init__.py b/src/webargs/__init__.py\n--- a/src/webargs/__init__.py\n+++ b/src/webargs/__init__.py\n@@ -1,3 +1,5 @@\n+from __future__ import annotations\n+\n from packaging.version import Version\n from marshmallow.utils import missing\n \n@@ -9,7 +11,9 @@\n \n __version__ = \"8.0.1\"\n __parsed_version__ = Version(__version__)\n-__version_info__ = __parsed_version__.release\n+__version_info__: tuple[int, int, int] | tuple[\n+ int, int, int, str, int\n+] = __parsed_version__.release # type: ignore[assignment]\n if __parsed_version__.pre:\n- __version_info__ += __parsed_version__.pre\n+ __version_info__ += __parsed_version__.pre # type: ignore[assignment]\n __all__ = (\"ValidationError\", \"fields\", \"missing\", \"validate\")\n", "issue": "typing issue with __version_info__ += __parsed_version__.pre\nmypy issue:\r\n\r\n```\r\n __version_info__ += __parsed_version__.pre\r\n```\r\n\r\n```\r\nsrc/webargs/__init__.py:14: error: Unsupported operand types for + (\"Tuple[int, ...]\" and \"Tuple[str, int]\")\r\n```\r\n\r\nNot sure what the problem is. I'm tempted to just add a `# type: ignore`. Any better idea, anyone?\n", "before_files": [{"content": "from packaging.version import Version\nfrom marshmallow.utils import missing\n\n# Make marshmallow's validation functions importable from webargs\nfrom marshmallow import validate\n\nfrom webargs.core import ValidationError\nfrom webargs import fields\n\n__version__ = \"8.0.1\"\n__parsed_version__ = Version(__version__)\n__version_info__ = __parsed_version__.release\nif __parsed_version__.pre:\n __version_info__ += __parsed_version__.pre\n__all__ = (\"ValidationError\", \"fields\", \"missing\", \"validate\")\n", "path": "src/webargs/__init__.py"}, {"content": "import re\nfrom setuptools import setup, find_packages\n\nFRAMEWORKS = [\n \"Flask>=0.12.5\",\n \"Django>=2.2.0\",\n \"bottle>=0.12.13\",\n \"tornado>=4.5.2\",\n \"pyramid>=1.9.1\",\n \"falcon>=2.0.0\",\n \"aiohttp>=3.0.8\",\n]\nEXTRAS_REQUIRE = {\n \"frameworks\": FRAMEWORKS,\n \"tests\": [\n \"pytest\",\n \"webtest==3.0.0\",\n \"webtest-aiohttp==2.0.0\",\n \"pytest-aiohttp>=0.3.0\",\n ]\n + FRAMEWORKS,\n \"lint\": [\n \"mypy==0.910\",\n \"flake8==4.0.1\",\n \"flake8-bugbear==21.11.29\",\n \"pre-commit~=2.4\",\n ],\n \"docs\": [\n \"Sphinx==4.3.2\",\n \"sphinx-issues==2.0.0\",\n \"furo==2022.1.2\",\n ]\n + FRAMEWORKS,\n}\nEXTRAS_REQUIRE[\"dev\"] = EXTRAS_REQUIRE[\"tests\"] + EXTRAS_REQUIRE[\"lint\"] + [\"tox\"]\n\n\ndef find_version(fname):\n \"\"\"Attempts to find the version number in the file names fname.\n Raises RuntimeError if not found.\n \"\"\"\n version = \"\"\n with open(fname) as fp:\n reg = re.compile(r'__version__ = [\\'\"]([^\\'\"]*)[\\'\"]')\n for line in fp:\n m = reg.match(line)\n if m:\n version = m.group(1)\n break\n if not version:\n raise RuntimeError(\"Cannot find version information\")\n return version\n\n\ndef read(fname):\n with open(fname) as fp:\n content = fp.read()\n return content\n\n\nsetup(\n name=\"webargs\",\n version=find_version(\"src/webargs/__init__.py\"),\n description=(\n \"Declarative parsing and validation of HTTP request objects, \"\n \"with built-in support for popular web frameworks, including \"\n \"Flask, Django, Bottle, Tornado, Pyramid, Falcon, and aiohttp.\"\n ),\n long_description=read(\"README.rst\"),\n author=\"Steven Loria\",\n author_email=\"[email protected]\",\n url=\"https://github.com/marshmallow-code/webargs\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n package_data={\"webargs\": [\"py.typed\"]},\n install_requires=[\"marshmallow>=3.0.0\", \"packaging\"],\n extras_require=EXTRAS_REQUIRE,\n license=\"MIT\",\n zip_safe=False,\n keywords=(\n \"webargs\",\n \"http\",\n \"flask\",\n \"django\",\n \"bottle\",\n \"tornado\",\n \"aiohttp\",\n \"request\",\n \"arguments\",\n \"validation\",\n \"parameters\",\n \"rest\",\n \"api\",\n \"marshmallow\",\n ),\n python_requires=\">=3.7\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n ],\n test_suite=\"tests\",\n project_urls={\n \"Changelog\": \"https://webargs.readthedocs.io/en/latest/changelog.html\",\n \"Issues\": \"https://github.com/marshmallow-code/webargs/issues\",\n \"Funding\": \"https://opencollective.com/marshmallow\",\n \"Tidelift\": \"https://tidelift.com/subscription/pkg/pypi-webargs?utm_source=pypi-marshmallow&utm_medium=pypi\", # noqa\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom packaging.version import Version\nfrom marshmallow.utils import missing\n\n# Make marshmallow's validation functions importable from webargs\nfrom marshmallow import validate\n\nfrom webargs.core import ValidationError\nfrom webargs import fields\n\n__version__ = \"8.0.1\"\n__parsed_version__ = Version(__version__)\n__version_info__: tuple[int, int, int] | tuple[\n int, int, int, str, int\n] = __parsed_version__.release # type: ignore[assignment]\nif __parsed_version__.pre:\n __version_info__ += __parsed_version__.pre # type: ignore[assignment]\n__all__ = (\"ValidationError\", \"fields\", \"missing\", \"validate\")\n", "path": "src/webargs/__init__.py"}, {"content": "import re\nfrom setuptools import setup, find_packages\n\nFRAMEWORKS = [\n \"Flask>=0.12.5\",\n \"Django>=2.2.0\",\n \"bottle>=0.12.13\",\n \"tornado>=4.5.2\",\n \"pyramid>=1.9.1\",\n \"falcon>=2.0.0\",\n \"aiohttp>=3.0.8\",\n]\nEXTRAS_REQUIRE = {\n \"frameworks\": FRAMEWORKS,\n \"tests\": [\n \"pytest\",\n \"webtest==3.0.0\",\n \"webtest-aiohttp==2.0.0\",\n \"pytest-aiohttp>=0.3.0\",\n ]\n + FRAMEWORKS,\n \"lint\": [\n \"mypy==0.930\",\n \"flake8==4.0.1\",\n \"flake8-bugbear==21.11.29\",\n \"pre-commit~=2.4\",\n ],\n \"docs\": [\n \"Sphinx==4.3.2\",\n \"sphinx-issues==2.0.0\",\n \"furo==2022.1.2\",\n ]\n + FRAMEWORKS,\n}\nEXTRAS_REQUIRE[\"dev\"] = EXTRAS_REQUIRE[\"tests\"] + EXTRAS_REQUIRE[\"lint\"] + [\"tox\"]\n\n\ndef find_version(fname):\n \"\"\"Attempts to find the version number in the file names fname.\n Raises RuntimeError if not found.\n \"\"\"\n version = \"\"\n with open(fname) as fp:\n reg = re.compile(r'__version__ = [\\'\"]([^\\'\"]*)[\\'\"]')\n for line in fp:\n m = reg.match(line)\n if m:\n version = m.group(1)\n break\n if not version:\n raise RuntimeError(\"Cannot find version information\")\n return version\n\n\ndef read(fname):\n with open(fname) as fp:\n content = fp.read()\n return content\n\n\nsetup(\n name=\"webargs\",\n version=find_version(\"src/webargs/__init__.py\"),\n description=(\n \"Declarative parsing and validation of HTTP request objects, \"\n \"with built-in support for popular web frameworks, including \"\n \"Flask, Django, Bottle, Tornado, Pyramid, Falcon, and aiohttp.\"\n ),\n long_description=read(\"README.rst\"),\n author=\"Steven Loria\",\n author_email=\"[email protected]\",\n url=\"https://github.com/marshmallow-code/webargs\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n package_data={\"webargs\": [\"py.typed\"]},\n install_requires=[\"marshmallow>=3.0.0\", \"packaging\"],\n extras_require=EXTRAS_REQUIRE,\n license=\"MIT\",\n zip_safe=False,\n keywords=(\n \"webargs\",\n \"http\",\n \"flask\",\n \"django\",\n \"bottle\",\n \"tornado\",\n \"aiohttp\",\n \"request\",\n \"arguments\",\n \"validation\",\n \"parameters\",\n \"rest\",\n \"api\",\n \"marshmallow\",\n ),\n python_requires=\">=3.7\",\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content\",\n \"Topic :: Internet :: WWW/HTTP :: WSGI :: Application\",\n ],\n test_suite=\"tests\",\n project_urls={\n \"Changelog\": \"https://webargs.readthedocs.io/en/latest/changelog.html\",\n \"Issues\": \"https://github.com/marshmallow-code/webargs/issues\",\n \"Funding\": \"https://opencollective.com/marshmallow\",\n \"Tidelift\": \"https://tidelift.com/subscription/pkg/pypi-webargs?utm_source=pypi-marshmallow&utm_medium=pypi\", # noqa\n },\n)\n", "path": "setup.py"}]} | 1,699 | 320 |
gh_patches_debug_11347 | rasdani/github-patches | git_diff | plotly__dash-999 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] + in version string breaks fingerprint system
**Describe your context**
- replace the result of `pip list | grep dash` below
```
dash 1.5.1
dash-core-components 1.4.0
dash-daq 0.2.2
dash-html-components 1.0.1
dash-renderer 1.2.0
dash-table 4.5.0
```
**Describe the bug**
When going from `dash==1.4` to `dash==1.5`, we experienced a breaking change in the custom Dash components we use.
It took some hours to debug, but the reason was found to be related to the new "fingerprint" system in Dash. In our project, we use the [setuptools_scm](https://github.com/pypa/setuptools_scm) package (by the Python Packaging Authority) in order to have a versioning system that automatically is linked to the git repo tags. This makes continuous deployment to e.g. Pypi easy and robust wrt. keeping versions consistent.
I.e. instead of
```python
__version__ = package['version']
```
in the component package, we use something like
```
__version__ = get_distribution(__name__).version
```
This worked until `dash==1.5`, then it broke on non-release-versions due to automatic tags of the type
`1.0.0.dev5+af4304c.d20191103`, where the tag includes a `+`. See [the default tag formats](https://github.com/pypa/setuptools_scm#default-versioning-scheme).
Changing the line above to
```
__version__ = get_distribution(__name__).version.replace("+", ".")
```
is one workaround that gets the third party components to also work on `dash==1.5`
**Expected behavior**
`setuptools_scm` provided versions to work also in `dash>=1.5`.
**Suggested solution**
Change [this line](https://github.com/plotly/dash/blob/40b5357f262ac207f94ac980e6cb928d94df65b7/dash/fingerprint.py#L12) in Dash's `build_fingerprint` to also replace `+` with `_`?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dash/fingerprint.py`
Content:
```
1 import re
2
3 cache_regex = re.compile(r"^v[\w-]+m[0-9a-fA-F]+$")
4
5
6 def build_fingerprint(path, version, hash_value):
7 path_parts = path.split("/")
8 filename, extension = path_parts[-1].split(".", 1)
9
10 return "{}.v{}m{}.{}".format(
11 "/".join(path_parts[:-1] + [filename]),
12 str(version).replace(".", "_"),
13 hash_value,
14 extension,
15 )
16
17
18 def check_fingerprint(path):
19 path_parts = path.split("/")
20 name_parts = path_parts[-1].split(".")
21
22 # Check if the resource has a fingerprint
23 if len(name_parts) > 2 and cache_regex.match(name_parts[1]):
24 original_name = ".".join([name_parts[0]] + name_parts[2:])
25 return "/".join(path_parts[:-1] + [original_name]), True
26
27 return path, False
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dash/fingerprint.py b/dash/fingerprint.py
--- a/dash/fingerprint.py
+++ b/dash/fingerprint.py
@@ -1,7 +1,7 @@
import re
cache_regex = re.compile(r"^v[\w-]+m[0-9a-fA-F]+$")
-
+version_clean = re.compile(r"[^\w-]")
def build_fingerprint(path, version, hash_value):
path_parts = path.split("/")
@@ -9,7 +9,7 @@
return "{}.v{}m{}.{}".format(
"/".join(path_parts[:-1] + [filename]),
- str(version).replace(".", "_"),
+ re.sub(version_clean, "_", str(version)),
hash_value,
extension,
)
| {"golden_diff": "diff --git a/dash/fingerprint.py b/dash/fingerprint.py\n--- a/dash/fingerprint.py\n+++ b/dash/fingerprint.py\n@@ -1,7 +1,7 @@\n import re\n \n cache_regex = re.compile(r\"^v[\\w-]+m[0-9a-fA-F]+$\")\n-\n+version_clean = re.compile(r\"[^\\w-]\")\n \n def build_fingerprint(path, version, hash_value):\n path_parts = path.split(\"/\")\n@@ -9,7 +9,7 @@\n \n return \"{}.v{}m{}.{}\".format(\n \"/\".join(path_parts[:-1] + [filename]),\n- str(version).replace(\".\", \"_\"),\n+ re.sub(version_clean, \"_\", str(version)),\n hash_value,\n extension,\n )\n", "issue": "[BUG] + in version string breaks fingerprint system\n**Describe your context**\r\n- replace the result of `pip list | grep dash` below\r\n```\r\ndash 1.5.1 \r\ndash-core-components 1.4.0 \r\ndash-daq 0.2.2 \r\ndash-html-components 1.0.1 \r\ndash-renderer 1.2.0 \r\ndash-table 4.5.0 \r\n```\r\n\r\n**Describe the bug**\r\n\r\nWhen going from `dash==1.4` to `dash==1.5`, we experienced a breaking change in the custom Dash components we use.\r\n\r\nIt took some hours to debug, but the reason was found to be related to the new \"fingerprint\" system in Dash. In our project, we use the [setuptools_scm](https://github.com/pypa/setuptools_scm) package (by the Python Packaging Authority) in order to have a versioning system that automatically is linked to the git repo tags. This makes continuous deployment to e.g. Pypi easy and robust wrt. keeping versions consistent.\r\n\r\nI.e. instead of\r\n```python\r\n__version__ = package['version']\r\n```\r\nin the component package, we use something like\r\n```\r\n__version__ = get_distribution(__name__).version\r\n```\r\nThis worked until `dash==1.5`, then it broke on non-release-versions due to automatic tags of the type\r\n`1.0.0.dev5+af4304c.d20191103`, where the tag includes a `+`. See [the default tag formats](https://github.com/pypa/setuptools_scm#default-versioning-scheme).\r\n\r\nChanging the line above to\r\n```\r\n__version__ = get_distribution(__name__).version.replace(\"+\", \".\")\r\n```\r\nis one workaround that gets the third party components to also work on `dash==1.5`\r\n\r\n**Expected behavior**\r\n\r\n`setuptools_scm` provided versions to work also in `dash>=1.5`.\r\n\r\n**Suggested solution**\r\n\r\nChange [this line](https://github.com/plotly/dash/blob/40b5357f262ac207f94ac980e6cb928d94df65b7/dash/fingerprint.py#L12) in Dash's `build_fingerprint` to also replace `+` with `_`?\n", "before_files": [{"content": "import re\n\ncache_regex = re.compile(r\"^v[\\w-]+m[0-9a-fA-F]+$\")\n\n\ndef build_fingerprint(path, version, hash_value):\n path_parts = path.split(\"/\")\n filename, extension = path_parts[-1].split(\".\", 1)\n\n return \"{}.v{}m{}.{}\".format(\n \"/\".join(path_parts[:-1] + [filename]),\n str(version).replace(\".\", \"_\"),\n hash_value,\n extension,\n )\n\n\ndef check_fingerprint(path):\n path_parts = path.split(\"/\")\n name_parts = path_parts[-1].split(\".\")\n\n # Check if the resource has a fingerprint\n if len(name_parts) > 2 and cache_regex.match(name_parts[1]):\n original_name = \".\".join([name_parts[0]] + name_parts[2:])\n return \"/\".join(path_parts[:-1] + [original_name]), True\n\n return path, False\n", "path": "dash/fingerprint.py"}], "after_files": [{"content": "import re\n\ncache_regex = re.compile(r\"^v[\\w-]+m[0-9a-fA-F]+$\")\nversion_clean = re.compile(r\"[^\\w-]\")\n\ndef build_fingerprint(path, version, hash_value):\n path_parts = path.split(\"/\")\n filename, extension = path_parts[-1].split(\".\", 1)\n\n return \"{}.v{}m{}.{}\".format(\n \"/\".join(path_parts[:-1] + [filename]),\n re.sub(version_clean, \"_\", str(version)),\n hash_value,\n extension,\n )\n\n\ndef check_fingerprint(path):\n path_parts = path.split(\"/\")\n name_parts = path_parts[-1].split(\".\")\n\n # Check if the resource has a fingerprint\n if len(name_parts) > 2 and cache_regex.match(name_parts[1]):\n original_name = \".\".join([name_parts[0]] + name_parts[2:])\n return \"/\".join(path_parts[:-1] + [original_name]), True\n\n return path, False\n", "path": "dash/fingerprint.py"}]} | 1,031 | 165 |
gh_patches_debug_175 | rasdani/github-patches | git_diff | open-mmlab__mmengine-684 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
config/utils.py haven't mmyolo

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmengine/config/utils.py`
Content:
```
1 # Copyright (c) OpenMMLab. All rights reserved.
2 import ast
3 import os.path as osp
4 import re
5 import warnings
6 from typing import Tuple
7
8 from mmengine.fileio import load
9 from mmengine.utils import check_file_exist
10
11 PKG2PROJECT = {
12 'mmcls': 'mmcls',
13 'mmdet': 'mmdet',
14 'mmdet3d': 'mmdet3d',
15 'mmseg': 'mmsegmentation',
16 'mmaction2': 'mmaction2',
17 'mmtrack': 'mmtrack',
18 'mmpose': 'mmpose',
19 'mmedit': 'mmedit',
20 'mmocr': 'mmocr',
21 'mmgen': 'mmgen',
22 'mmfewshot': 'mmfewshot',
23 'mmrazor': 'mmrazor',
24 'mmflow': 'mmflow',
25 'mmhuman3d': 'mmhuman3d',
26 'mmrotate': 'mmrotate',
27 'mmselfsup': 'mmselfsup',
28 }
29
30
31 def _get_cfg_metainfo(package_path: str, cfg_path: str) -> dict:
32 """Get target meta information from all 'metafile.yml' defined in `mode-
33 index.yml` of external package.
34
35 Args:
36 package_path (str): Path of external package.
37 cfg_path (str): Name of experiment config.
38
39 Returns:
40 dict: Meta information of target experiment.
41 """
42 meta_index_path = osp.join(package_path, '.mim', 'model-index.yml')
43 meta_index = load(meta_index_path)
44 cfg_dict = dict()
45 for meta_path in meta_index['Import']:
46 meta_path = osp.join(package_path, '.mim', meta_path)
47 cfg_meta = load(meta_path)
48 for model_cfg in cfg_meta['Models']:
49 if 'Config' not in model_cfg:
50 warnings.warn(f'There is not `Config` define in {model_cfg}')
51 continue
52 cfg_name = model_cfg['Config'].partition('/')[-1]
53 # Some config could have multiple weights, we only pick the
54 # first one.
55 if cfg_name in cfg_dict:
56 continue
57 cfg_dict[cfg_name] = model_cfg
58 if cfg_path not in cfg_dict:
59 raise ValueError(f'Expected configs: {cfg_dict.keys()}, but got '
60 f'{cfg_path}')
61 return cfg_dict[cfg_path]
62
63
64 def _get_external_cfg_path(package_path: str, cfg_file: str) -> str:
65 """Get config path of external package.
66
67 Args:
68 package_path (str): Path of external package.
69 cfg_file (str): Name of experiment config.
70
71 Returns:
72 str: Absolute config path from external package.
73 """
74 cfg_file = cfg_file.split('.')[0]
75 model_cfg = _get_cfg_metainfo(package_path, cfg_file)
76 cfg_path = osp.join(package_path, model_cfg['Config'])
77 check_file_exist(cfg_path)
78 return cfg_path
79
80
81 def _get_external_cfg_base_path(package_path: str, cfg_name: str) -> str:
82 """Get base config path of external package.
83
84 Args:
85 package_path (str): Path of external package.
86 cfg_name (str): External relative config path with 'package::'.
87
88 Returns:
89 str: Absolute config path from external package.
90 """
91 cfg_path = osp.join(package_path, '.mim', 'configs', cfg_name)
92 check_file_exist(cfg_path)
93 return cfg_path
94
95
96 def _get_package_and_cfg_path(cfg_path: str) -> Tuple[str, str]:
97 """Get package name and relative config path.
98
99 Args:
100 cfg_path (str): External relative config path with 'package::'.
101
102 Returns:
103 Tuple[str, str]: Package name and config path.
104 """
105 if re.match(r'\w*::\w*/\w*', cfg_path) is None:
106 raise ValueError(
107 '`_get_package_and_cfg_path` is used for get external package, '
108 'please specify the package name and relative config path, just '
109 'like `mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py`')
110 package_cfg = cfg_path.split('::')
111 if len(package_cfg) > 2:
112 raise ValueError('`::` should only be used to separate package and '
113 'config name, but found multiple `::` in '
114 f'{cfg_path}')
115 package, cfg_path = package_cfg
116 assert package in PKG2PROJECT, 'mmengine does not support to load ' \
117 f'{package} config.'
118 package = PKG2PROJECT[package]
119 return package, cfg_path
120
121
122 class RemoveAssignFromAST(ast.NodeTransformer):
123 """Remove Assign node if the target's name match the key.
124
125 Args:
126 key (str): The target name of the Assign node.
127 """
128
129 def __init__(self, key):
130 self.key = key
131
132 def visit_Assign(self, node):
133 if (isinstance(node.targets[0], ast.Name)
134 and node.targets[0].id == self.key):
135 return None
136 else:
137 return node
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmengine/config/utils.py b/mmengine/config/utils.py
--- a/mmengine/config/utils.py
+++ b/mmengine/config/utils.py
@@ -25,6 +25,7 @@
'mmhuman3d': 'mmhuman3d',
'mmrotate': 'mmrotate',
'mmselfsup': 'mmselfsup',
+ 'mmyolo': 'mmyolo',
}
| {"golden_diff": "diff --git a/mmengine/config/utils.py b/mmengine/config/utils.py\n--- a/mmengine/config/utils.py\n+++ b/mmengine/config/utils.py\n@@ -25,6 +25,7 @@\n 'mmhuman3d': 'mmhuman3d',\n 'mmrotate': 'mmrotate',\n 'mmselfsup': 'mmselfsup',\n+ 'mmyolo': 'mmyolo',\n }\n", "issue": "config/utils.py haven't mmyolo\n\r\n\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport ast\nimport os.path as osp\nimport re\nimport warnings\nfrom typing import Tuple\n\nfrom mmengine.fileio import load\nfrom mmengine.utils import check_file_exist\n\nPKG2PROJECT = {\n 'mmcls': 'mmcls',\n 'mmdet': 'mmdet',\n 'mmdet3d': 'mmdet3d',\n 'mmseg': 'mmsegmentation',\n 'mmaction2': 'mmaction2',\n 'mmtrack': 'mmtrack',\n 'mmpose': 'mmpose',\n 'mmedit': 'mmedit',\n 'mmocr': 'mmocr',\n 'mmgen': 'mmgen',\n 'mmfewshot': 'mmfewshot',\n 'mmrazor': 'mmrazor',\n 'mmflow': 'mmflow',\n 'mmhuman3d': 'mmhuman3d',\n 'mmrotate': 'mmrotate',\n 'mmselfsup': 'mmselfsup',\n}\n\n\ndef _get_cfg_metainfo(package_path: str, cfg_path: str) -> dict:\n \"\"\"Get target meta information from all 'metafile.yml' defined in `mode-\n index.yml` of external package.\n\n Args:\n package_path (str): Path of external package.\n cfg_path (str): Name of experiment config.\n\n Returns:\n dict: Meta information of target experiment.\n \"\"\"\n meta_index_path = osp.join(package_path, '.mim', 'model-index.yml')\n meta_index = load(meta_index_path)\n cfg_dict = dict()\n for meta_path in meta_index['Import']:\n meta_path = osp.join(package_path, '.mim', meta_path)\n cfg_meta = load(meta_path)\n for model_cfg in cfg_meta['Models']:\n if 'Config' not in model_cfg:\n warnings.warn(f'There is not `Config` define in {model_cfg}')\n continue\n cfg_name = model_cfg['Config'].partition('/')[-1]\n # Some config could have multiple weights, we only pick the\n # first one.\n if cfg_name in cfg_dict:\n continue\n cfg_dict[cfg_name] = model_cfg\n if cfg_path not in cfg_dict:\n raise ValueError(f'Expected configs: {cfg_dict.keys()}, but got '\n f'{cfg_path}')\n return cfg_dict[cfg_path]\n\n\ndef _get_external_cfg_path(package_path: str, cfg_file: str) -> str:\n \"\"\"Get config path of external package.\n\n Args:\n package_path (str): Path of external package.\n cfg_file (str): Name of experiment config.\n\n Returns:\n str: Absolute config path from external package.\n \"\"\"\n cfg_file = cfg_file.split('.')[0]\n model_cfg = _get_cfg_metainfo(package_path, cfg_file)\n cfg_path = osp.join(package_path, model_cfg['Config'])\n check_file_exist(cfg_path)\n return cfg_path\n\n\ndef _get_external_cfg_base_path(package_path: str, cfg_name: str) -> str:\n \"\"\"Get base config path of external package.\n\n Args:\n package_path (str): Path of external package.\n cfg_name (str): External relative config path with 'package::'.\n\n Returns:\n str: Absolute config path from external package.\n \"\"\"\n cfg_path = osp.join(package_path, '.mim', 'configs', cfg_name)\n check_file_exist(cfg_path)\n return cfg_path\n\n\ndef _get_package_and_cfg_path(cfg_path: str) -> Tuple[str, str]:\n \"\"\"Get package name and relative config path.\n\n Args:\n cfg_path (str): External relative config path with 'package::'.\n\n Returns:\n Tuple[str, str]: Package name and config path.\n \"\"\"\n if re.match(r'\\w*::\\w*/\\w*', cfg_path) is None:\n raise ValueError(\n '`_get_package_and_cfg_path` is used for get external package, '\n 'please specify the package name and relative config path, just '\n 'like `mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py`')\n package_cfg = cfg_path.split('::')\n if len(package_cfg) > 2:\n raise ValueError('`::` should only be used to separate package and '\n 'config name, but found multiple `::` in '\n f'{cfg_path}')\n package, cfg_path = package_cfg\n assert package in PKG2PROJECT, 'mmengine does not support to load ' \\\n f'{package} config.'\n package = PKG2PROJECT[package]\n return package, cfg_path\n\n\nclass RemoveAssignFromAST(ast.NodeTransformer):\n \"\"\"Remove Assign node if the target's name match the key.\n\n Args:\n key (str): The target name of the Assign node.\n \"\"\"\n\n def __init__(self, key):\n self.key = key\n\n def visit_Assign(self, node):\n if (isinstance(node.targets[0], ast.Name)\n and node.targets[0].id == self.key):\n return None\n else:\n return node\n", "path": "mmengine/config/utils.py"}], "after_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport ast\nimport os.path as osp\nimport re\nimport warnings\nfrom typing import Tuple\n\nfrom mmengine.fileio import load\nfrom mmengine.utils import check_file_exist\n\nPKG2PROJECT = {\n 'mmcls': 'mmcls',\n 'mmdet': 'mmdet',\n 'mmdet3d': 'mmdet3d',\n 'mmseg': 'mmsegmentation',\n 'mmaction2': 'mmaction2',\n 'mmtrack': 'mmtrack',\n 'mmpose': 'mmpose',\n 'mmedit': 'mmedit',\n 'mmocr': 'mmocr',\n 'mmgen': 'mmgen',\n 'mmfewshot': 'mmfewshot',\n 'mmrazor': 'mmrazor',\n 'mmflow': 'mmflow',\n 'mmhuman3d': 'mmhuman3d',\n 'mmrotate': 'mmrotate',\n 'mmselfsup': 'mmselfsup',\n 'mmyolo': 'mmyolo',\n}\n\n\ndef _get_cfg_metainfo(package_path: str, cfg_path: str) -> dict:\n \"\"\"Get target meta information from all 'metafile.yml' defined in `mode-\n index.yml` of external package.\n\n Args:\n package_path (str): Path of external package.\n cfg_path (str): Name of experiment config.\n\n Returns:\n dict: Meta information of target experiment.\n \"\"\"\n meta_index_path = osp.join(package_path, '.mim', 'model-index.yml')\n meta_index = load(meta_index_path)\n cfg_dict = dict()\n for meta_path in meta_index['Import']:\n meta_path = osp.join(package_path, '.mim', meta_path)\n cfg_meta = load(meta_path)\n for model_cfg in cfg_meta['Models']:\n if 'Config' not in model_cfg:\n warnings.warn(f'There is not `Config` define in {model_cfg}')\n continue\n cfg_name = model_cfg['Config'].partition('/')[-1]\n # Some config could have multiple weights, we only pick the\n # first one.\n if cfg_name in cfg_dict:\n continue\n cfg_dict[cfg_name] = model_cfg\n if cfg_path not in cfg_dict:\n raise ValueError(f'Expected configs: {cfg_dict.keys()}, but got '\n f'{cfg_path}')\n return cfg_dict[cfg_path]\n\n\ndef _get_external_cfg_path(package_path: str, cfg_file: str) -> str:\n \"\"\"Get config path of external package.\n\n Args:\n package_path (str): Path of external package.\n cfg_file (str): Name of experiment config.\n\n Returns:\n str: Absolute config path from external package.\n \"\"\"\n cfg_file = cfg_file.split('.')[0]\n model_cfg = _get_cfg_metainfo(package_path, cfg_file)\n cfg_path = osp.join(package_path, model_cfg['Config'])\n check_file_exist(cfg_path)\n return cfg_path\n\n\ndef _get_external_cfg_base_path(package_path: str, cfg_name: str) -> str:\n \"\"\"Get base config path of external package.\n\n Args:\n package_path (str): Path of external package.\n cfg_name (str): External relative config path with 'package::'.\n\n Returns:\n str: Absolute config path from external package.\n \"\"\"\n cfg_path = osp.join(package_path, '.mim', 'configs', cfg_name)\n check_file_exist(cfg_path)\n return cfg_path\n\n\ndef _get_package_and_cfg_path(cfg_path: str) -> Tuple[str, str]:\n \"\"\"Get package name and relative config path.\n\n Args:\n cfg_path (str): External relative config path with 'package::'.\n\n Returns:\n Tuple[str, str]: Package name and config path.\n \"\"\"\n if re.match(r'\\w*::\\w*/\\w*', cfg_path) is None:\n raise ValueError(\n '`_get_package_and_cfg_path` is used for get external package, '\n 'please specify the package name and relative config path, just '\n 'like `mmdet::faster_rcnn/faster-rcnn_r50_fpn_1x_coco.py`')\n package_cfg = cfg_path.split('::')\n if len(package_cfg) > 2:\n raise ValueError('`::` should only be used to separate package and '\n 'config name, but found multiple `::` in '\n f'{cfg_path}')\n package, cfg_path = package_cfg\n assert package in PKG2PROJECT, 'mmengine does not support to load ' \\\n f'{package} config.'\n package = PKG2PROJECT[package]\n return package, cfg_path\n\n\nclass RemoveAssignFromAST(ast.NodeTransformer):\n \"\"\"Remove Assign node if the target's name match the key.\n\n Args:\n key (str): The target name of the Assign node.\n \"\"\"\n\n def __init__(self, key):\n self.key = key\n\n def visit_Assign(self, node):\n if (isinstance(node.targets[0], ast.Name)\n and node.targets[0].id == self.key):\n return None\n else:\n return node\n", "path": "mmengine/config/utils.py"}]} | 1,777 | 90 |
gh_patches_debug_58655 | rasdani/github-patches | git_diff | Anselmoo__spectrafit-715 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Feature]: Add python 3.11 support
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Missing Feature
Add python 3.11 support
### Possible Solution
_No response_
### Anything else?
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `spectrafit/__init__.py`
Content:
```
1 """SpectraFit, fast command line tool for fitting data."""
2 __version__ = "0.16.6"
3
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py
--- a/spectrafit/__init__.py
+++ b/spectrafit/__init__.py
@@ -1,2 +1,2 @@
"""SpectraFit, fast command line tool for fitting data."""
-__version__ = "0.16.6"
+__version__ = "0.16.7"
| {"golden_diff": "diff --git a/spectrafit/__init__.py b/spectrafit/__init__.py\n--- a/spectrafit/__init__.py\n+++ b/spectrafit/__init__.py\n@@ -1,2 +1,2 @@\n \"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n-__version__ = \"0.16.6\"\n+__version__ = \"0.16.7\"\n", "issue": "[Feature]: Add python 3.11 support\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Missing Feature\n\nAdd python 3.11 support\n\n### Possible Solution\n\n_No response_\n\n### Anything else?\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"0.16.6\"\n", "path": "spectrafit/__init__.py"}], "after_files": [{"content": "\"\"\"SpectraFit, fast command line tool for fitting data.\"\"\"\n__version__ = \"0.16.7\"\n", "path": "spectrafit/__init__.py"}]} | 370 | 94 |
gh_patches_debug_2025 | rasdani/github-patches | git_diff | pre-commit__pre-commit-2836 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Alternative to stashing files for testing
Are there any plans to implement alternatives to stashing the worktree?
Ideally this would be hook/scriptable, like some 'prepare-worktree' and 'restore-worktree' options (which default to the current stash behavior) but can also yield some new directory where the tests are run. The rationale here is that my editor reverts files changed on disk and I'd like to add notes to source files while the commit is in progress.
In my own pre-commit hooks I use something like:
git archive "$(git write-tree)" --prefix="$test_dir/" | tar xf -
To create a pristine source tree (actually, I also prime it with `cp -rl` with build artifacts from the previous build to speed up incremental builds). 'git-worktree' and other tools could be used as well...
Eventually I have the idea to run some (more expensive) pre-commit checks in the background while one types the commit message. Then in the commit-msg hook wait for the background results and abort the commit there. This should reduce the turn around times significantly.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/languages/swift.py`
Content:
```
1 from __future__ import annotations
2
3 import contextlib
4 import os
5 from typing import Generator
6 from typing import Sequence
7
8 from pre_commit import lang_base
9 from pre_commit.envcontext import envcontext
10 from pre_commit.envcontext import PatchesT
11 from pre_commit.envcontext import Var
12 from pre_commit.prefix import Prefix
13 from pre_commit.util import cmd_output_b
14
15 BUILD_DIR = '.build'
16 BUILD_CONFIG = 'release'
17
18 ENVIRONMENT_DIR = 'swift_env'
19 get_default_version = lang_base.basic_get_default_version
20 health_check = lang_base.basic_health_check
21 run_hook = lang_base.basic_run_hook
22
23
24 def get_env_patch(venv: str) -> PatchesT: # pragma: win32 no cover
25 bin_path = os.path.join(venv, BUILD_DIR, BUILD_CONFIG)
26 return (('PATH', (bin_path, os.pathsep, Var('PATH'))),)
27
28
29 @contextlib.contextmanager # pragma: win32 no cover
30 def in_env(prefix: Prefix, version: str) -> Generator[None, None, None]:
31 envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)
32 with envcontext(get_env_patch(envdir)):
33 yield
34
35
36 def install_environment(
37 prefix: Prefix, version: str, additional_dependencies: Sequence[str],
38 ) -> None: # pragma: win32 no cover
39 lang_base.assert_version_default('swift', version)
40 lang_base.assert_no_additional_deps('swift', additional_dependencies)
41 envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)
42
43 # Build the swift package
44 os.mkdir(envdir)
45 cmd_output_b(
46 'swift', 'build',
47 '-C', prefix.prefix_dir,
48 '-c', BUILD_CONFIG,
49 '--build-path', os.path.join(envdir, BUILD_DIR),
50 )
51
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/languages/swift.py b/pre_commit/languages/swift.py
--- a/pre_commit/languages/swift.py
+++ b/pre_commit/languages/swift.py
@@ -44,7 +44,7 @@
os.mkdir(envdir)
cmd_output_b(
'swift', 'build',
- '-C', prefix.prefix_dir,
+ '--package-path', prefix.prefix_dir,
'-c', BUILD_CONFIG,
'--build-path', os.path.join(envdir, BUILD_DIR),
)
| {"golden_diff": "diff --git a/pre_commit/languages/swift.py b/pre_commit/languages/swift.py\n--- a/pre_commit/languages/swift.py\n+++ b/pre_commit/languages/swift.py\n@@ -44,7 +44,7 @@\n os.mkdir(envdir)\n cmd_output_b(\n 'swift', 'build',\n- '-C', prefix.prefix_dir,\n+ '--package-path', prefix.prefix_dir,\n '-c', BUILD_CONFIG,\n '--build-path', os.path.join(envdir, BUILD_DIR),\n )\n", "issue": "Alternative to stashing files for testing\nAre there any plans to implement alternatives to stashing the worktree?\r\n\r\nIdeally this would be hook/scriptable, like some 'prepare-worktree' and 'restore-worktree' options (which default to the current stash behavior) but can also yield some new directory where the tests are run. The rationale here is that my editor reverts files changed on disk and I'd like to add notes to source files while the commit is in progress.\r\n\r\nIn my own pre-commit hooks I use something like:\r\n\r\n git archive \"$(git write-tree)\" --prefix=\"$test_dir/\" | tar xf -\r\n\r\nTo create a pristine source tree (actually, I also prime it with `cp -rl` with build artifacts from the previous build to speed up incremental builds). 'git-worktree' and other tools could be used as well...\r\n\r\nEventually I have the idea to run some (more expensive) pre-commit checks in the background while one types the commit message. Then in the commit-msg hook wait for the background results and abort the commit there. This should reduce the turn around times significantly.\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport contextlib\nimport os\nfrom typing import Generator\nfrom typing import Sequence\n\nfrom pre_commit import lang_base\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import Var\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import cmd_output_b\n\nBUILD_DIR = '.build'\nBUILD_CONFIG = 'release'\n\nENVIRONMENT_DIR = 'swift_env'\nget_default_version = lang_base.basic_get_default_version\nhealth_check = lang_base.basic_health_check\nrun_hook = lang_base.basic_run_hook\n\n\ndef get_env_patch(venv: str) -> PatchesT: # pragma: win32 no cover\n bin_path = os.path.join(venv, BUILD_DIR, BUILD_CONFIG)\n return (('PATH', (bin_path, os.pathsep, Var('PATH'))),)\n\n\[email protected] # pragma: win32 no cover\ndef in_env(prefix: Prefix, version: str) -> Generator[None, None, None]:\n envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None: # pragma: win32 no cover\n lang_base.assert_version_default('swift', version)\n lang_base.assert_no_additional_deps('swift', additional_dependencies)\n envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)\n\n # Build the swift package\n os.mkdir(envdir)\n cmd_output_b(\n 'swift', 'build',\n '-C', prefix.prefix_dir,\n '-c', BUILD_CONFIG,\n '--build-path', os.path.join(envdir, BUILD_DIR),\n )\n", "path": "pre_commit/languages/swift.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport contextlib\nimport os\nfrom typing import Generator\nfrom typing import Sequence\n\nfrom pre_commit import lang_base\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import Var\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import cmd_output_b\n\nBUILD_DIR = '.build'\nBUILD_CONFIG = 'release'\n\nENVIRONMENT_DIR = 'swift_env'\nget_default_version = lang_base.basic_get_default_version\nhealth_check = lang_base.basic_health_check\nrun_hook = lang_base.basic_run_hook\n\n\ndef get_env_patch(venv: str) -> PatchesT: # pragma: win32 no cover\n bin_path = os.path.join(venv, BUILD_DIR, BUILD_CONFIG)\n return (('PATH', (bin_path, os.pathsep, Var('PATH'))),)\n\n\[email protected] # pragma: win32 no cover\ndef in_env(prefix: Prefix, version: str) -> Generator[None, None, None]:\n envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef install_environment(\n prefix: Prefix, version: str, additional_dependencies: Sequence[str],\n) -> None: # pragma: win32 no cover\n lang_base.assert_version_default('swift', version)\n lang_base.assert_no_additional_deps('swift', additional_dependencies)\n envdir = lang_base.environment_dir(prefix, ENVIRONMENT_DIR, version)\n\n # Build the swift package\n os.mkdir(envdir)\n cmd_output_b(\n 'swift', 'build',\n '--package-path', prefix.prefix_dir,\n '-c', BUILD_CONFIG,\n '--build-path', os.path.join(envdir, BUILD_DIR),\n )\n", "path": "pre_commit/languages/swift.py"}]} | 972 | 112 |
gh_patches_debug_16451 | rasdani/github-patches | git_diff | getredash__redash-602 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
API keys should be supported in the HTTP headers
Currently it seems that all API calls must include the `api_key` in the query string. Ideally the HTTP headers could also be used (e.g. `Authorization: Key XXXX` or `X-Api-Key`) so that Web server logs don't log the API key in the clear.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/authentication.py`
Content:
```
1 import hashlib
2 import hmac
3 import time
4 import logging
5
6 from flask.ext.login import LoginManager
7 from flask.ext.login import user_logged_in
8
9 from redash import models, settings, google_oauth, saml_auth
10 from redash.tasks import record_event
11
12 login_manager = LoginManager()
13 logger = logging.getLogger('authentication')
14
15
16 def sign(key, path, expires):
17 if not key:
18 return None
19
20 h = hmac.new(str(key), msg=path, digestmod=hashlib.sha1)
21 h.update(str(expires))
22
23 return h.hexdigest()
24
25
26 @login_manager.user_loader
27 def load_user(user_id):
28 return models.User.get_by_id(user_id)
29
30
31 def hmac_load_user_from_request(request):
32 signature = request.args.get('signature')
33 expires = float(request.args.get('expires') or 0)
34 query_id = request.view_args.get('query_id', None)
35 user_id = request.args.get('user_id', None)
36
37 # TODO: 3600 should be a setting
38 if signature and time.time() < expires <= time.time() + 3600:
39 if user_id:
40 user = models.User.get_by_id(user_id)
41 calculated_signature = sign(user.api_key, request.path, expires)
42
43 if user.api_key and signature == calculated_signature:
44 return user
45
46 if query_id:
47 query = models.Query.get(models.Query.id == query_id)
48 calculated_signature = sign(query.api_key, request.path, expires)
49
50 if query.api_key and signature == calculated_signature:
51 return models.ApiUser(query.api_key)
52
53 return None
54
55 def get_user_from_api_key(api_key, query_id):
56 if not api_key:
57 return None
58
59 user = None
60 try:
61 user = models.User.get_by_api_key(api_key)
62 except models.User.DoesNotExist:
63 if query_id:
64 query = models.Query.get_by_id(query_id)
65 if query and query.api_key == api_key:
66 user = models.ApiUser(api_key)
67
68 return user
69
70 def api_key_load_user_from_request(request):
71 api_key = request.args.get('api_key', None)
72 query_id = request.view_args.get('query_id', None)
73
74 user = get_user_from_api_key(api_key, query_id)
75 return user
76
77
78 def log_user_logged_in(app, user):
79 event = {
80 'user_id': user.id,
81 'action': 'login',
82 'object_type': 'redash',
83 'timestamp': int(time.time()),
84 }
85
86 record_event.delay(event)
87
88
89 def setup_authentication(app):
90 login_manager.init_app(app)
91 login_manager.anonymous_user = models.AnonymousUser
92 login_manager.login_view = 'login'
93 app.secret_key = settings.COOKIE_SECRET
94 app.register_blueprint(google_oauth.blueprint)
95 app.register_blueprint(saml_auth.blueprint)
96
97 user_logged_in.connect(log_user_logged_in)
98
99 if settings.AUTH_TYPE == 'hmac':
100 login_manager.request_loader(hmac_load_user_from_request)
101 elif settings.AUTH_TYPE == 'api_key':
102 login_manager.request_loader(api_key_load_user_from_request)
103 else:
104 logger.warning("Unknown authentication type ({}). Using default (HMAC).".format(settings.AUTH_TYPE))
105 login_manager.request_loader(hmac_load_user_from_request)
106
107
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/authentication.py b/redash/authentication.py
--- a/redash/authentication.py
+++ b/redash/authentication.py
@@ -52,6 +52,7 @@
return None
+
def get_user_from_api_key(api_key, query_id):
if not api_key:
return None
@@ -67,8 +68,19 @@
return user
-def api_key_load_user_from_request(request):
+
+def get_api_key_from_request(request):
api_key = request.args.get('api_key', None)
+
+ if api_key is None and request.headers.get('Authorization'):
+ auth_header = request.headers.get('Authorization')
+ api_key = auth_header.replace('Key ', '', 1)
+
+ return api_key
+
+
+def api_key_load_user_from_request(request):
+ api_key = get_api_key_from_request(request)
query_id = request.view_args.get('query_id', None)
user = get_user_from_api_key(api_key, query_id)
| {"golden_diff": "diff --git a/redash/authentication.py b/redash/authentication.py\n--- a/redash/authentication.py\n+++ b/redash/authentication.py\n@@ -52,6 +52,7 @@\n \n return None\n \n+\n def get_user_from_api_key(api_key, query_id):\n if not api_key:\n return None\n@@ -67,8 +68,19 @@\n \n return user\n \n-def api_key_load_user_from_request(request):\n+\n+def get_api_key_from_request(request):\n api_key = request.args.get('api_key', None)\n+\n+ if api_key is None and request.headers.get('Authorization'):\n+ auth_header = request.headers.get('Authorization')\n+ api_key = auth_header.replace('Key ', '', 1)\n+\n+ return api_key\n+\n+\n+def api_key_load_user_from_request(request):\n+ api_key = get_api_key_from_request(request)\n query_id = request.view_args.get('query_id', None)\n \n user = get_user_from_api_key(api_key, query_id)\n", "issue": "API keys should be supported in the HTTP headers\nCurrently it seems that all API calls must include the `api_key` in the query string. Ideally the HTTP headers could also be used (e.g. `Authorization: Key XXXX` or `X-Api-Key`) so that Web server logs don't log the API key in the clear.\n\n", "before_files": [{"content": "import hashlib\nimport hmac\nimport time\nimport logging\n\nfrom flask.ext.login import LoginManager\nfrom flask.ext.login import user_logged_in\n\nfrom redash import models, settings, google_oauth, saml_auth\nfrom redash.tasks import record_event\n\nlogin_manager = LoginManager()\nlogger = logging.getLogger('authentication')\n\n\ndef sign(key, path, expires):\n if not key:\n return None\n\n h = hmac.new(str(key), msg=path, digestmod=hashlib.sha1)\n h.update(str(expires))\n\n return h.hexdigest()\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return models.User.get_by_id(user_id)\n\n\ndef hmac_load_user_from_request(request):\n signature = request.args.get('signature')\n expires = float(request.args.get('expires') or 0)\n query_id = request.view_args.get('query_id', None)\n user_id = request.args.get('user_id', None)\n\n # TODO: 3600 should be a setting\n if signature and time.time() < expires <= time.time() + 3600:\n if user_id:\n user = models.User.get_by_id(user_id)\n calculated_signature = sign(user.api_key, request.path, expires)\n\n if user.api_key and signature == calculated_signature:\n return user\n\n if query_id:\n query = models.Query.get(models.Query.id == query_id)\n calculated_signature = sign(query.api_key, request.path, expires)\n\n if query.api_key and signature == calculated_signature:\n return models.ApiUser(query.api_key)\n\n return None\n\ndef get_user_from_api_key(api_key, query_id):\n if not api_key:\n return None\n\n user = None\n try:\n user = models.User.get_by_api_key(api_key)\n except models.User.DoesNotExist:\n if query_id:\n query = models.Query.get_by_id(query_id)\n if query and query.api_key == api_key:\n user = models.ApiUser(api_key)\n\n return user\n\ndef api_key_load_user_from_request(request):\n api_key = request.args.get('api_key', None)\n query_id = request.view_args.get('query_id', None)\n\n user = get_user_from_api_key(api_key, query_id)\n return user\n\n\ndef log_user_logged_in(app, user):\n event = {\n 'user_id': user.id,\n 'action': 'login',\n 'object_type': 'redash',\n 'timestamp': int(time.time()),\n }\n\n record_event.delay(event)\n\n\ndef setup_authentication(app):\n login_manager.init_app(app)\n login_manager.anonymous_user = models.AnonymousUser\n login_manager.login_view = 'login'\n app.secret_key = settings.COOKIE_SECRET\n app.register_blueprint(google_oauth.blueprint)\n app.register_blueprint(saml_auth.blueprint)\n\n user_logged_in.connect(log_user_logged_in)\n\n if settings.AUTH_TYPE == 'hmac':\n login_manager.request_loader(hmac_load_user_from_request)\n elif settings.AUTH_TYPE == 'api_key':\n login_manager.request_loader(api_key_load_user_from_request)\n else:\n logger.warning(\"Unknown authentication type ({}). Using default (HMAC).\".format(settings.AUTH_TYPE))\n login_manager.request_loader(hmac_load_user_from_request)\n\n\n", "path": "redash/authentication.py"}], "after_files": [{"content": "import hashlib\nimport hmac\nimport time\nimport logging\n\nfrom flask.ext.login import LoginManager\nfrom flask.ext.login import user_logged_in\n\nfrom redash import models, settings, google_oauth, saml_auth\nfrom redash.tasks import record_event\n\nlogin_manager = LoginManager()\nlogger = logging.getLogger('authentication')\n\n\ndef sign(key, path, expires):\n if not key:\n return None\n\n h = hmac.new(str(key), msg=path, digestmod=hashlib.sha1)\n h.update(str(expires))\n\n return h.hexdigest()\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return models.User.get_by_id(user_id)\n\n\ndef hmac_load_user_from_request(request):\n signature = request.args.get('signature')\n expires = float(request.args.get('expires') or 0)\n query_id = request.view_args.get('query_id', None)\n user_id = request.args.get('user_id', None)\n\n # TODO: 3600 should be a setting\n if signature and time.time() < expires <= time.time() + 3600:\n if user_id:\n user = models.User.get_by_id(user_id)\n calculated_signature = sign(user.api_key, request.path, expires)\n\n if user.api_key and signature == calculated_signature:\n return user\n\n if query_id:\n query = models.Query.get(models.Query.id == query_id)\n calculated_signature = sign(query.api_key, request.path, expires)\n\n if query.api_key and signature == calculated_signature:\n return models.ApiUser(query.api_key)\n\n return None\n\n\ndef get_user_from_api_key(api_key, query_id):\n if not api_key:\n return None\n\n user = None\n try:\n user = models.User.get_by_api_key(api_key)\n except models.User.DoesNotExist:\n if query_id:\n query = models.Query.get_by_id(query_id)\n if query and query.api_key == api_key:\n user = models.ApiUser(api_key)\n\n return user\n\n\ndef get_api_key_from_request(request):\n api_key = request.args.get('api_key', None)\n\n if api_key is None and request.headers.get('Authorization'):\n auth_header = request.headers.get('Authorization')\n api_key = auth_header.replace('Key ', '', 1)\n\n return api_key\n\n\ndef api_key_load_user_from_request(request):\n api_key = get_api_key_from_request(request)\n query_id = request.view_args.get('query_id', None)\n\n user = get_user_from_api_key(api_key, query_id)\n return user\n\n\ndef log_user_logged_in(app, user):\n event = {\n 'user_id': user.id,\n 'action': 'login',\n 'object_type': 'redash',\n 'timestamp': int(time.time()),\n }\n\n record_event.delay(event)\n\n\ndef setup_authentication(app):\n login_manager.init_app(app)\n login_manager.anonymous_user = models.AnonymousUser\n login_manager.login_view = 'login'\n app.secret_key = settings.COOKIE_SECRET\n app.register_blueprint(google_oauth.blueprint)\n app.register_blueprint(saml_auth.blueprint)\n\n user_logged_in.connect(log_user_logged_in)\n\n if settings.AUTH_TYPE == 'hmac':\n login_manager.request_loader(hmac_load_user_from_request)\n elif settings.AUTH_TYPE == 'api_key':\n login_manager.request_loader(api_key_load_user_from_request)\n else:\n logger.warning(\"Unknown authentication type ({}). Using default (HMAC).\".format(settings.AUTH_TYPE))\n login_manager.request_loader(hmac_load_user_from_request)\n\n\n", "path": "redash/authentication.py"}]} | 1,253 | 219 |
gh_patches_debug_42802 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-1318 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
missing file or path in source: aha_region_de.py
Hi,
I recently installed Version 1.42.0 using HACS and cant get it to run.
Changed the adress to one of the test-adresses, but same issue.
That home directory '/home/silas/tmp/test.html' seems like debug file for some server-responds. But thats not going to work :)
Any ideas?
Thanks for your help!
configuration.yaml
```
waste_collection_schedule:
sources:
- name: aha_region_de
args:
gemeinde: "Hannover"
strasse: "Voltastr. / Vahrenwald"
hnr: "25"
zusatz: ""
```
```
Logger: waste_collection_schedule.source_shell
Source: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136
Integration: waste_collection_schedule (documentation)
First occurred: 20:08:22 (2 occurrences)
Last logged: 20:09:05
fetch failed for source Zweckverband Abfallwirtschaft Region Hannover: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/aha_region_de.py", line 85, in fetch with open("/home/silas/tmp/test.html", "w") as f: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ FileNotFoundError: [Errno 2] No such file or directory: '/home/silas/tmp/test.html'`
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/aha_region_de.py`
Content:
```
1 from waste_collection_schedule import Collection # type: ignore[attr-defined]
2 from waste_collection_schedule.service.ICS import ICS
3
4 import requests
5 from bs4 import BeautifulSoup
6
7 TITLE = "Zweckverband Abfallwirtschaft Region Hannover"
8 DESCRIPTION = "Source for Zweckverband Abfallwirtschaft Region Hannover."
9 URL = "https://www.aha-region.de/"
10 TEST_CASES = {
11 "Neustadt a. Rbge., Am Rotdorn / Nöpke, 1 ": {
12 "gemeinde": "Neustadt a. Rbge.",
13 "strasse": "Am Rotdorn / Nöpke",
14 "hnr": 1,
15 },
16 "Isernhagen, Am Lohner Hof / Isernhagen Fb, 10": {
17 "gemeinde": "Isernhagen",
18 "strasse": "Am Lohner Hof / Isernhagen Fb",
19 "hnr": "10",
20 },
21 "Hannover, Voltastr. / Vahrenwald, 25": {
22 "gemeinde": "Hannover",
23 "strasse": "Voltastr. / Vahrenwald",
24 "hnr": "25",
25 },
26 "Hannover, Melanchthonstr., 10A": {
27 "gemeinde": "Hannover",
28 "strasse": "Melanchthonstr.",
29 "hnr": "10",
30 "zusatz": "A",
31 }
32 }
33
34 ICON_MAP = {
35 "Restabfall": "mdi:trash-can",
36 "Glass": "mdi:bottle-soda",
37 "Bioabfall": "mdi:leaf",
38 "Papier": "mdi:package-variant",
39 "Leichtverpackungen": "mdi:recycle",
40 }
41
42 API_URL = "https://www.aha-region.de/abholtermine/abfuhrkalender"
43
44 class Source:
45 def __init__(self, gemeinde: str, strasse: str, hnr: str | int, zusatz: str | int = ""):
46 self._gemeinde: str = gemeinde
47 self._strasse: str = strasse
48 self._hnr: str = str(hnr)
49 self._zusatz: str = str(zusatz)
50 self._ics = ICS()
51
52 def fetch(self):
53 # find strassen_id
54 r = requests.get(API_URL, params={"gemeinde": self._gemeinde, "von": "A", "bis": "["})
55 r.raise_for_status()
56
57 strassen_id = None
58 selects = BeautifulSoup(r.text, "html.parser").find("select", {"id": "strasse"}).find_all("option")
59 for select in selects:
60 if select.text.lower().replace(" ", "") == self._strasse.lower().replace(" ", ""):
61 strassen_id = select["value"]
62 break
63
64 if not strassen_id:
65 raise Exception("Street not found for gemeinde: " + self._gemeinde + " and strasse: " + self._strasse)
66
67 # request overview page
68 args = {
69 "gemeinde": self._gemeinde,
70 "jsaus": "",
71 "strasse": strassen_id,
72 "hausnr": self._hnr,
73 "hausnraddon": self._zusatz,
74 "anzeigen": "Suchen",
75 }
76
77 r = requests.post(API_URL, data=args)
78 r.raise_for_status()
79
80 soup = BeautifulSoup(r.text, "html.parser")
81 # find all ICAL download buttons
82 download_buttons = soup.find_all("button", {"name": "ical_apple"})
83
84 if not download_buttons:
85 with open("/home/silas/tmp/test.html", "w") as f:
86 f.write(r.text)
87 raise Exception("Invalid response from server, check you configuration if it is correct.")
88
89 entries = []
90
91 for button in download_buttons:
92 # get form data and request ICAL file for every waste type
93 args = {}
94 args["ical_apple"] = button["value"]
95 form = button.parent
96 for input in form.find_all("input"):
97 args[input["name"]] = input["value"]
98
99 r = requests.post(API_URL, data=args)
100 r.encoding = "utf-8"
101
102 dates = self._ics.convert(r.text)
103
104 for d in dates:
105 bin_type = d[1].replace("Abfuhr", "").strip()
106 entries.append(Collection(d[0], bin_type, ICON_MAP.get(bin_type)))
107
108 return entries
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/aha_region_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/aha_region_de.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/aha_region_de.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/aha_region_de.py
@@ -1,8 +1,7 @@
-from waste_collection_schedule import Collection # type: ignore[attr-defined]
-from waste_collection_schedule.service.ICS import ICS
-
import requests
from bs4 import BeautifulSoup
+from waste_collection_schedule import Collection # type: ignore[attr-defined]
+from waste_collection_schedule.service.ICS import ICS
TITLE = "Zweckverband Abfallwirtschaft Region Hannover"
DESCRIPTION = "Source for Zweckverband Abfallwirtschaft Region Hannover."
@@ -14,9 +13,9 @@
"hnr": 1,
},
"Isernhagen, Am Lohner Hof / Isernhagen Fb, 10": {
- "gemeinde": "Isernhagen",
- "strasse": "Am Lohner Hof / Isernhagen Fb",
- "hnr": "10",
+ "gemeinde": "Isernhagen",
+ "strasse": "Am Lohner Hof / Isernhagen Fb",
+ "hnr": "10",
},
"Hannover, Voltastr. / Vahrenwald, 25": {
"gemeinde": "Hannover",
@@ -28,7 +27,7 @@
"strasse": "Melanchthonstr.",
"hnr": "10",
"zusatz": "A",
- }
+ },
}
ICON_MAP = {
@@ -41,8 +40,11 @@
API_URL = "https://www.aha-region.de/abholtermine/abfuhrkalender"
+
class Source:
- def __init__(self, gemeinde: str, strasse: str, hnr: str | int, zusatz: str | int = ""):
+ def __init__(
+ self, gemeinde: str, strasse: str, hnr: str | int, zusatz: str | int = ""
+ ):
self._gemeinde: str = gemeinde
self._strasse: str = strasse
self._hnr: str = str(hnr)
@@ -51,18 +53,31 @@
def fetch(self):
# find strassen_id
- r = requests.get(API_URL, params={"gemeinde": self._gemeinde, "von": "A", "bis": "["})
+ r = requests.get(
+ API_URL, params={"gemeinde": self._gemeinde, "von": "A", "bis": "["}
+ )
r.raise_for_status()
strassen_id = None
- selects = BeautifulSoup(r.text, "html.parser").find("select", {"id": "strasse"}).find_all("option")
+ selects = (
+ BeautifulSoup(r.text, "html.parser")
+ .find("select", {"id": "strasse"})
+ .find_all("option")
+ )
for select in selects:
- if select.text.lower().replace(" ", "") == self._strasse.lower().replace(" ", ""):
+ if select.text.lower().replace(" ", "") == self._strasse.lower().replace(
+ " ", ""
+ ):
strassen_id = select["value"]
break
if not strassen_id:
- raise Exception("Street not found for gemeinde: " + self._gemeinde + " and strasse: " + self._strasse)
+ raise Exception(
+ "Street not found for gemeinde: "
+ + self._gemeinde
+ + " and strasse: "
+ + self._strasse
+ )
# request overview page
args = {
@@ -82,9 +97,9 @@
download_buttons = soup.find_all("button", {"name": "ical_apple"})
if not download_buttons:
- with open("/home/silas/tmp/test.html", "w") as f:
- f.write(r.text)
- raise Exception("Invalid response from server, check you configuration if it is correct.")
+ raise Exception(
+ "Invalid response from server, check you configuration if it is correct."
+ )
entries = []
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/aha_region_de.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/aha_region_de.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/aha_region_de.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/aha_region_de.py\n@@ -1,8 +1,7 @@\n-from waste_collection_schedule import Collection # type: ignore[attr-defined]\n-from waste_collection_schedule.service.ICS import ICS\n-\n import requests\n from bs4 import BeautifulSoup\n+from waste_collection_schedule import Collection # type: ignore[attr-defined]\n+from waste_collection_schedule.service.ICS import ICS\n \n TITLE = \"Zweckverband Abfallwirtschaft Region Hannover\"\n DESCRIPTION = \"Source for Zweckverband Abfallwirtschaft Region Hannover.\"\n@@ -14,9 +13,9 @@\n \"hnr\": 1,\n },\n \"Isernhagen, Am Lohner Hof / Isernhagen Fb, 10\": {\n- \"gemeinde\": \"Isernhagen\",\n- \"strasse\": \"Am Lohner Hof / Isernhagen Fb\",\n- \"hnr\": \"10\",\n+ \"gemeinde\": \"Isernhagen\",\n+ \"strasse\": \"Am Lohner Hof / Isernhagen Fb\",\n+ \"hnr\": \"10\",\n },\n \"Hannover, Voltastr. / Vahrenwald, 25\": {\n \"gemeinde\": \"Hannover\",\n@@ -28,7 +27,7 @@\n \"strasse\": \"Melanchthonstr.\",\n \"hnr\": \"10\",\n \"zusatz\": \"A\",\n- }\n+ },\n }\n \n ICON_MAP = {\n@@ -41,8 +40,11 @@\n \n API_URL = \"https://www.aha-region.de/abholtermine/abfuhrkalender\"\n \n+\n class Source:\n- def __init__(self, gemeinde: str, strasse: str, hnr: str | int, zusatz: str | int = \"\"):\n+ def __init__(\n+ self, gemeinde: str, strasse: str, hnr: str | int, zusatz: str | int = \"\"\n+ ):\n self._gemeinde: str = gemeinde\n self._strasse: str = strasse\n self._hnr: str = str(hnr)\n@@ -51,18 +53,31 @@\n \n def fetch(self):\n # find strassen_id\n- r = requests.get(API_URL, params={\"gemeinde\": self._gemeinde, \"von\": \"A\", \"bis\": \"[\"})\n+ r = requests.get(\n+ API_URL, params={\"gemeinde\": self._gemeinde, \"von\": \"A\", \"bis\": \"[\"}\n+ )\n r.raise_for_status()\n \n strassen_id = None\n- selects = BeautifulSoup(r.text, \"html.parser\").find(\"select\", {\"id\": \"strasse\"}).find_all(\"option\")\n+ selects = (\n+ BeautifulSoup(r.text, \"html.parser\")\n+ .find(\"select\", {\"id\": \"strasse\"})\n+ .find_all(\"option\")\n+ )\n for select in selects:\n- if select.text.lower().replace(\" \", \"\") == self._strasse.lower().replace(\" \", \"\"):\n+ if select.text.lower().replace(\" \", \"\") == self._strasse.lower().replace(\n+ \" \", \"\"\n+ ):\n strassen_id = select[\"value\"]\n break\n \n if not strassen_id:\n- raise Exception(\"Street not found for gemeinde: \" + self._gemeinde + \" and strasse: \" + self._strasse)\n+ raise Exception(\n+ \"Street not found for gemeinde: \"\n+ + self._gemeinde\n+ + \" and strasse: \"\n+ + self._strasse\n+ )\n \n # request overview page\n args = {\n@@ -82,9 +97,9 @@\n download_buttons = soup.find_all(\"button\", {\"name\": \"ical_apple\"})\n \n if not download_buttons:\n- with open(\"/home/silas/tmp/test.html\", \"w\") as f:\n- f.write(r.text)\n- raise Exception(\"Invalid response from server, check you configuration if it is correct.\")\n+ raise Exception(\n+ \"Invalid response from server, check you configuration if it is correct.\"\n+ )\n \n entries = []\n", "issue": "missing file or path in source: aha_region_de.py\nHi,\r\nI recently installed Version 1.42.0 using HACS and cant get it to run.\r\nChanged the adress to one of the test-adresses, but same issue.\r\n\r\nThat home directory '/home/silas/tmp/test.html' seems like debug file for some server-responds. But thats not going to work :)\r\n\r\nAny ideas?\r\n\r\nThanks for your help!\r\n\r\nconfiguration.yaml\r\n```\r\nwaste_collection_schedule:\r\n sources:\r\n - name: aha_region_de\r\n args:\r\n gemeinde: \"Hannover\"\r\n strasse: \"Voltastr. / Vahrenwald\"\r\n hnr: \"25\"\r\n zusatz: \"\"\r\n```\r\n\r\n```\r\nLogger: waste_collection_schedule.source_shell\r\nSource: custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py:136\r\nIntegration: waste_collection_schedule (documentation)\r\nFirst occurred: 20:08:22 (2 occurrences)\r\nLast logged: 20:09:05\r\n\r\nfetch failed for source Zweckverband Abfallwirtschaft Region Hannover: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source_shell.py\", line 134, in fetch entries = self._source.fetch() ^^^^^^^^^^^^^^^^^^^^ File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/aha_region_de.py\", line 85, in fetch with open(\"/home/silas/tmp/test.html\", \"w\") as f: ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ FileNotFoundError: [Errno 2] No such file or directory: '/home/silas/tmp/test.html'`\r\n```\n", "before_files": [{"content": "from waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nTITLE = \"Zweckverband Abfallwirtschaft Region Hannover\"\nDESCRIPTION = \"Source for Zweckverband Abfallwirtschaft Region Hannover.\"\nURL = \"https://www.aha-region.de/\"\nTEST_CASES = {\n \"Neustadt a. Rbge., Am Rotdorn / N\u00f6pke, 1 \": {\n \"gemeinde\": \"Neustadt a. Rbge.\",\n \"strasse\": \"Am Rotdorn / N\u00f6pke\",\n \"hnr\": 1,\n },\n \"Isernhagen, Am Lohner Hof / Isernhagen Fb, 10\": {\n \"gemeinde\": \"Isernhagen\",\n \"strasse\": \"Am Lohner Hof / Isernhagen Fb\",\n \"hnr\": \"10\",\n },\n \"Hannover, Voltastr. / Vahrenwald, 25\": {\n \"gemeinde\": \"Hannover\",\n \"strasse\": \"Voltastr. / Vahrenwald\",\n \"hnr\": \"25\",\n },\n \"Hannover, Melanchthonstr., 10A\": {\n \"gemeinde\": \"Hannover\",\n \"strasse\": \"Melanchthonstr.\",\n \"hnr\": \"10\",\n \"zusatz\": \"A\",\n }\n}\n\nICON_MAP = {\n \"Restabfall\": \"mdi:trash-can\",\n \"Glass\": \"mdi:bottle-soda\",\n \"Bioabfall\": \"mdi:leaf\",\n \"Papier\": \"mdi:package-variant\",\n \"Leichtverpackungen\": \"mdi:recycle\",\n}\n\nAPI_URL = \"https://www.aha-region.de/abholtermine/abfuhrkalender\"\n\nclass Source:\n def __init__(self, gemeinde: str, strasse: str, hnr: str | int, zusatz: str | int = \"\"):\n self._gemeinde: str = gemeinde\n self._strasse: str = strasse\n self._hnr: str = str(hnr)\n self._zusatz: str = str(zusatz)\n self._ics = ICS()\n\n def fetch(self):\n # find strassen_id\n r = requests.get(API_URL, params={\"gemeinde\": self._gemeinde, \"von\": \"A\", \"bis\": \"[\"})\n r.raise_for_status()\n\n strassen_id = None\n selects = BeautifulSoup(r.text, \"html.parser\").find(\"select\", {\"id\": \"strasse\"}).find_all(\"option\")\n for select in selects:\n if select.text.lower().replace(\" \", \"\") == self._strasse.lower().replace(\" \", \"\"):\n strassen_id = select[\"value\"]\n break\n\n if not strassen_id:\n raise Exception(\"Street not found for gemeinde: \" + self._gemeinde + \" and strasse: \" + self._strasse)\n\n # request overview page\n args = {\n \"gemeinde\": self._gemeinde,\n \"jsaus\": \"\",\n \"strasse\": strassen_id,\n \"hausnr\": self._hnr,\n \"hausnraddon\": self._zusatz,\n \"anzeigen\": \"Suchen\",\n }\n\n r = requests.post(API_URL, data=args)\n r.raise_for_status()\n\n soup = BeautifulSoup(r.text, \"html.parser\")\n # find all ICAL download buttons\n download_buttons = soup.find_all(\"button\", {\"name\": \"ical_apple\"})\n\n if not download_buttons:\n with open(\"/home/silas/tmp/test.html\", \"w\") as f:\n f.write(r.text)\n raise Exception(\"Invalid response from server, check you configuration if it is correct.\")\n\n entries = []\n\n for button in download_buttons:\n # get form data and request ICAL file for every waste type\n args = {}\n args[\"ical_apple\"] = button[\"value\"]\n form = button.parent\n for input in form.find_all(\"input\"):\n args[input[\"name\"]] = input[\"value\"]\n\n r = requests.post(API_URL, data=args)\n r.encoding = \"utf-8\"\n\n dates = self._ics.convert(r.text)\n\n for d in dates:\n bin_type = d[1].replace(\"Abfuhr\", \"\").strip()\n entries.append(Collection(d[0], bin_type, ICON_MAP.get(bin_type)))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/aha_region_de.py"}], "after_files": [{"content": "import requests\nfrom bs4 import BeautifulSoup\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\n\nTITLE = \"Zweckverband Abfallwirtschaft Region Hannover\"\nDESCRIPTION = \"Source for Zweckverband Abfallwirtschaft Region Hannover.\"\nURL = \"https://www.aha-region.de/\"\nTEST_CASES = {\n \"Neustadt a. Rbge., Am Rotdorn / N\u00f6pke, 1 \": {\n \"gemeinde\": \"Neustadt a. Rbge.\",\n \"strasse\": \"Am Rotdorn / N\u00f6pke\",\n \"hnr\": 1,\n },\n \"Isernhagen, Am Lohner Hof / Isernhagen Fb, 10\": {\n \"gemeinde\": \"Isernhagen\",\n \"strasse\": \"Am Lohner Hof / Isernhagen Fb\",\n \"hnr\": \"10\",\n },\n \"Hannover, Voltastr. / Vahrenwald, 25\": {\n \"gemeinde\": \"Hannover\",\n \"strasse\": \"Voltastr. / Vahrenwald\",\n \"hnr\": \"25\",\n },\n \"Hannover, Melanchthonstr., 10A\": {\n \"gemeinde\": \"Hannover\",\n \"strasse\": \"Melanchthonstr.\",\n \"hnr\": \"10\",\n \"zusatz\": \"A\",\n },\n}\n\nICON_MAP = {\n \"Restabfall\": \"mdi:trash-can\",\n \"Glass\": \"mdi:bottle-soda\",\n \"Bioabfall\": \"mdi:leaf\",\n \"Papier\": \"mdi:package-variant\",\n \"Leichtverpackungen\": \"mdi:recycle\",\n}\n\nAPI_URL = \"https://www.aha-region.de/abholtermine/abfuhrkalender\"\n\n\nclass Source:\n def __init__(\n self, gemeinde: str, strasse: str, hnr: str | int, zusatz: str | int = \"\"\n ):\n self._gemeinde: str = gemeinde\n self._strasse: str = strasse\n self._hnr: str = str(hnr)\n self._zusatz: str = str(zusatz)\n self._ics = ICS()\n\n def fetch(self):\n # find strassen_id\n r = requests.get(\n API_URL, params={\"gemeinde\": self._gemeinde, \"von\": \"A\", \"bis\": \"[\"}\n )\n r.raise_for_status()\n\n strassen_id = None\n selects = (\n BeautifulSoup(r.text, \"html.parser\")\n .find(\"select\", {\"id\": \"strasse\"})\n .find_all(\"option\")\n )\n for select in selects:\n if select.text.lower().replace(\" \", \"\") == self._strasse.lower().replace(\n \" \", \"\"\n ):\n strassen_id = select[\"value\"]\n break\n\n if not strassen_id:\n raise Exception(\n \"Street not found for gemeinde: \"\n + self._gemeinde\n + \" and strasse: \"\n + self._strasse\n )\n\n # request overview page\n args = {\n \"gemeinde\": self._gemeinde,\n \"jsaus\": \"\",\n \"strasse\": strassen_id,\n \"hausnr\": self._hnr,\n \"hausnraddon\": self._zusatz,\n \"anzeigen\": \"Suchen\",\n }\n\n r = requests.post(API_URL, data=args)\n r.raise_for_status()\n\n soup = BeautifulSoup(r.text, \"html.parser\")\n # find all ICAL download buttons\n download_buttons = soup.find_all(\"button\", {\"name\": \"ical_apple\"})\n\n if not download_buttons:\n raise Exception(\n \"Invalid response from server, check you configuration if it is correct.\"\n )\n\n entries = []\n\n for button in download_buttons:\n # get form data and request ICAL file for every waste type\n args = {}\n args[\"ical_apple\"] = button[\"value\"]\n form = button.parent\n for input in form.find_all(\"input\"):\n args[input[\"name\"]] = input[\"value\"]\n\n r = requests.post(API_URL, data=args)\n r.encoding = \"utf-8\"\n\n dates = self._ics.convert(r.text)\n\n for d in dates:\n bin_type = d[1].replace(\"Abfuhr\", \"\").strip()\n entries.append(Collection(d[0], bin_type, ICON_MAP.get(bin_type)))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/aha_region_de.py"}]} | 1,855 | 993 |
gh_patches_debug_30794 | rasdani/github-patches | git_diff | chainer__chainer-6991 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support ChainerX in F.GetItem backward
`GetItemGrad` does not suport it yet.
Related: #5944
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/array/get_item.py`
Content:
```
1 import numpy
2
3 import chainer
4 from chainer import backend
5 from chainer import function_node
6 from chainer import utils
7 from chainer.utils import type_check
8 from chainer import variable
9 import chainerx
10
11
12 _numpy_supports_0d_bool_index = \
13 numpy.lib.NumpyVersion(numpy.__version__) >= '1.13.0'
14
15
16 class GetItem(function_node.FunctionNode):
17
18 """Function that slices array and extract elements."""
19
20 def __init__(self, slices):
21 if isinstance(slices, list):
22 if all([isinstance(s, int) for s in slices]):
23 slices = slices,
24 slices = tuple(slices)
25 elif not isinstance(slices, tuple):
26 slices = slices,
27
28 if chainer.is_debug():
29 n_ellipses = 0
30 for s in slices:
31 if s is Ellipsis:
32 n_ellipses += 1
33 if n_ellipses > 1:
34 raise ValueError('Only one Ellipsis is allowed')
35
36 self.slices = slices
37
38 def check_type_forward(self, in_types):
39 type_check._argname(in_types, ('x',))
40
41 def forward(self, xs):
42 slices = tuple([
43 backend.from_chx(s) if isinstance(s, chainerx.ndarray) else s
44 for s in self.slices])
45 return utils.force_array(xs[0][slices]),
46
47 def backward(self, indexes, gy):
48 return GetItemGrad(
49 self.slices, self.inputs[0].shape).apply(gy)
50
51
52 class GetItemGrad(function_node.FunctionNode):
53
54 def __init__(self, slices, in_shape):
55 self.slices = slices
56 self._in_shape = in_shape
57
58 def forward(self, inputs):
59 gy, = inputs
60 xp = backend.get_array_module(*inputs)
61 gx = xp.zeros(self._in_shape, gy.dtype)
62 if xp is numpy:
63 try:
64 numpy.add.at(gx, self.slices, gy)
65 except IndexError:
66 done = False
67 # In numpy<1.13, 0-dim boolean index is not supported in
68 # numpy.add.at and it's supported for 0-dim arr in
69 # arr.__getitem__.
70 if not _numpy_supports_0d_bool_index and len(self.slices) == 1:
71 idx = numpy.asanyarray(self.slices[0])
72 if idx.dtype == numpy.dtype(bool):
73 # Convert the array and the mask to 1-dim.
74 # numpy.add.at with them is supported in older numpy.
75 numpy.add.at(gx[None], idx[None], gy)
76 done = True
77
78 if not done:
79 msg = '''
80 GetItem does not support backward for this slices. The slices argument is not
81 supported by numpy.add.at, while it is supported by numpy.ndarray.__getitem__.
82
83 Please report this error to the issue tracker with the stack trace,
84 the information of your environment, and your script:
85 https://github.com/chainer/chainer/issues/new.
86 '''
87 raise IndexError(msg)
88 else:
89 gx.scatter_add(self.slices, inputs[0])
90 return gx,
91
92 def backward(self, indexes, ggx):
93 return GetItem(self.slices).apply(ggx)
94
95
96 def get_item(x, slices):
97 """Extract elements from array with specified shape, axes and offsets.
98
99 Args:
100 x (:class:`~chainer.Variable` or :ref:`ndarray`):
101 A variable to be sliced.
102 slices (int, slice, Ellipsis, None, integer array-like, boolean\
103 array-like or tuple of them):
104 An object to specify the selection of elements.
105
106 Returns:
107 A :class:`~chainer.Variable` object which contains sliced array of
108 ``x``.
109
110 .. note::
111
112 It only supports types that are supported by CUDA's atomicAdd when
113 an integer array is included in ``slices``.
114 The supported types are ``numpy.float32``, ``numpy.int32``,
115 ``numpy.uint32``, ``numpy.uint64`` and ``numpy.ulonglong``.
116
117 .. note::
118
119 It does not support ``slices`` that contains multiple boolean arrays.
120
121 .. note::
122
123 See NumPy documentation for details of `indexing
124 <https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html>`_.
125
126 .. admonition:: Example
127
128 >>> x = np.arange(12).reshape((2, 2, 3))
129 >>> x
130 array([[[ 0, 1, 2],
131 [ 3, 4, 5]],
132 <BLANKLINE>
133 [[ 6, 7, 8],
134 [ 9, 10, 11]]])
135 >>> F.get_item(x, 0)
136 variable([[0, 1, 2],
137 [3, 4, 5]])
138 >>> F.get_item(x, (0, 0, slice(0, 2, 1))) # equals x[0, 0, 0:2:1]
139 variable([0, 1])
140 >>> F.get_item(x, (Ellipsis, 2)) # equals x[..., 2]
141 variable([[ 2, 5],
142 [ 8, 11]])
143 >>> F.get_item(x, (1, np.newaxis, 1, 0)) # equals x[1, None, 1, 0]
144 variable([9])
145
146 """
147 return GetItem(slices).apply((x,))[0]
148
149
150 def install_variable_get_item():
151 variable.Variable.__getitem__ = get_item
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/array/get_item.py b/chainer/functions/array/get_item.py
--- a/chainer/functions/array/get_item.py
+++ b/chainer/functions/array/get_item.py
@@ -56,19 +56,23 @@
self._in_shape = in_shape
def forward(self, inputs):
+ slices = tuple([
+ backend.from_chx(s) if isinstance(s, chainerx.ndarray) else s
+ for s in self.slices])
+
gy, = inputs
xp = backend.get_array_module(*inputs)
gx = xp.zeros(self._in_shape, gy.dtype)
if xp is numpy:
try:
- numpy.add.at(gx, self.slices, gy)
+ numpy.add.at(gx, slices, gy)
except IndexError:
done = False
# In numpy<1.13, 0-dim boolean index is not supported in
# numpy.add.at and it's supported for 0-dim arr in
# arr.__getitem__.
- if not _numpy_supports_0d_bool_index and len(self.slices) == 1:
- idx = numpy.asanyarray(self.slices[0])
+ if not _numpy_supports_0d_bool_index and len(slices) == 1:
+ idx = numpy.asanyarray(slices[0])
if idx.dtype == numpy.dtype(bool):
# Convert the array and the mask to 1-dim.
# numpy.add.at with them is supported in older numpy.
@@ -86,7 +90,7 @@
'''
raise IndexError(msg)
else:
- gx.scatter_add(self.slices, inputs[0])
+ gx.scatter_add(slices, inputs[0])
return gx,
def backward(self, indexes, ggx):
| {"golden_diff": "diff --git a/chainer/functions/array/get_item.py b/chainer/functions/array/get_item.py\n--- a/chainer/functions/array/get_item.py\n+++ b/chainer/functions/array/get_item.py\n@@ -56,19 +56,23 @@\n self._in_shape = in_shape\n \n def forward(self, inputs):\n+ slices = tuple([\n+ backend.from_chx(s) if isinstance(s, chainerx.ndarray) else s\n+ for s in self.slices])\n+\n gy, = inputs\n xp = backend.get_array_module(*inputs)\n gx = xp.zeros(self._in_shape, gy.dtype)\n if xp is numpy:\n try:\n- numpy.add.at(gx, self.slices, gy)\n+ numpy.add.at(gx, slices, gy)\n except IndexError:\n done = False\n # In numpy<1.13, 0-dim boolean index is not supported in\n # numpy.add.at and it's supported for 0-dim arr in\n # arr.__getitem__.\n- if not _numpy_supports_0d_bool_index and len(self.slices) == 1:\n- idx = numpy.asanyarray(self.slices[0])\n+ if not _numpy_supports_0d_bool_index and len(slices) == 1:\n+ idx = numpy.asanyarray(slices[0])\n if idx.dtype == numpy.dtype(bool):\n # Convert the array and the mask to 1-dim.\n # numpy.add.at with them is supported in older numpy.\n@@ -86,7 +90,7 @@\n '''\n raise IndexError(msg)\n else:\n- gx.scatter_add(self.slices, inputs[0])\n+ gx.scatter_add(slices, inputs[0])\n return gx,\n \n def backward(self, indexes, ggx):\n", "issue": "Support ChainerX in F.GetItem backward\n`GetItemGrad` does not suport it yet.\r\n\r\nRelated: #5944\n", "before_files": [{"content": "import numpy\n\nimport chainer\nfrom chainer import backend\nfrom chainer import function_node\nfrom chainer import utils\nfrom chainer.utils import type_check\nfrom chainer import variable\nimport chainerx\n\n\n_numpy_supports_0d_bool_index = \\\n numpy.lib.NumpyVersion(numpy.__version__) >= '1.13.0'\n\n\nclass GetItem(function_node.FunctionNode):\n\n \"\"\"Function that slices array and extract elements.\"\"\"\n\n def __init__(self, slices):\n if isinstance(slices, list):\n if all([isinstance(s, int) for s in slices]):\n slices = slices,\n slices = tuple(slices)\n elif not isinstance(slices, tuple):\n slices = slices,\n\n if chainer.is_debug():\n n_ellipses = 0\n for s in slices:\n if s is Ellipsis:\n n_ellipses += 1\n if n_ellipses > 1:\n raise ValueError('Only one Ellipsis is allowed')\n\n self.slices = slices\n\n def check_type_forward(self, in_types):\n type_check._argname(in_types, ('x',))\n\n def forward(self, xs):\n slices = tuple([\n backend.from_chx(s) if isinstance(s, chainerx.ndarray) else s\n for s in self.slices])\n return utils.force_array(xs[0][slices]),\n\n def backward(self, indexes, gy):\n return GetItemGrad(\n self.slices, self.inputs[0].shape).apply(gy)\n\n\nclass GetItemGrad(function_node.FunctionNode):\n\n def __init__(self, slices, in_shape):\n self.slices = slices\n self._in_shape = in_shape\n\n def forward(self, inputs):\n gy, = inputs\n xp = backend.get_array_module(*inputs)\n gx = xp.zeros(self._in_shape, gy.dtype)\n if xp is numpy:\n try:\n numpy.add.at(gx, self.slices, gy)\n except IndexError:\n done = False\n # In numpy<1.13, 0-dim boolean index is not supported in\n # numpy.add.at and it's supported for 0-dim arr in\n # arr.__getitem__.\n if not _numpy_supports_0d_bool_index and len(self.slices) == 1:\n idx = numpy.asanyarray(self.slices[0])\n if idx.dtype == numpy.dtype(bool):\n # Convert the array and the mask to 1-dim.\n # numpy.add.at with them is supported in older numpy.\n numpy.add.at(gx[None], idx[None], gy)\n done = True\n\n if not done:\n msg = '''\nGetItem does not support backward for this slices. The slices argument is not\nsupported by numpy.add.at, while it is supported by numpy.ndarray.__getitem__.\n\nPlease report this error to the issue tracker with the stack trace,\nthe information of your environment, and your script:\nhttps://github.com/chainer/chainer/issues/new.\n'''\n raise IndexError(msg)\n else:\n gx.scatter_add(self.slices, inputs[0])\n return gx,\n\n def backward(self, indexes, ggx):\n return GetItem(self.slices).apply(ggx)\n\n\ndef get_item(x, slices):\n \"\"\"Extract elements from array with specified shape, axes and offsets.\n\n Args:\n x (:class:`~chainer.Variable` or :ref:`ndarray`):\n A variable to be sliced.\n slices (int, slice, Ellipsis, None, integer array-like, boolean\\\n array-like or tuple of them):\n An object to specify the selection of elements.\n\n Returns:\n A :class:`~chainer.Variable` object which contains sliced array of\n ``x``.\n\n .. note::\n\n It only supports types that are supported by CUDA's atomicAdd when\n an integer array is included in ``slices``.\n The supported types are ``numpy.float32``, ``numpy.int32``,\n ``numpy.uint32``, ``numpy.uint64`` and ``numpy.ulonglong``.\n\n .. note::\n\n It does not support ``slices`` that contains multiple boolean arrays.\n\n .. note::\n\n See NumPy documentation for details of `indexing\n <https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html>`_.\n\n .. admonition:: Example\n\n >>> x = np.arange(12).reshape((2, 2, 3))\n >>> x\n array([[[ 0, 1, 2],\n [ 3, 4, 5]],\n <BLANKLINE>\n [[ 6, 7, 8],\n [ 9, 10, 11]]])\n >>> F.get_item(x, 0)\n variable([[0, 1, 2],\n [3, 4, 5]])\n >>> F.get_item(x, (0, 0, slice(0, 2, 1))) # equals x[0, 0, 0:2:1]\n variable([0, 1])\n >>> F.get_item(x, (Ellipsis, 2)) # equals x[..., 2]\n variable([[ 2, 5],\n [ 8, 11]])\n >>> F.get_item(x, (1, np.newaxis, 1, 0)) # equals x[1, None, 1, 0]\n variable([9])\n\n \"\"\"\n return GetItem(slices).apply((x,))[0]\n\n\ndef install_variable_get_item():\n variable.Variable.__getitem__ = get_item\n", "path": "chainer/functions/array/get_item.py"}], "after_files": [{"content": "import numpy\n\nimport chainer\nfrom chainer import backend\nfrom chainer import function_node\nfrom chainer import utils\nfrom chainer.utils import type_check\nfrom chainer import variable\nimport chainerx\n\n\n_numpy_supports_0d_bool_index = \\\n numpy.lib.NumpyVersion(numpy.__version__) >= '1.13.0'\n\n\nclass GetItem(function_node.FunctionNode):\n\n \"\"\"Function that slices array and extract elements.\"\"\"\n\n def __init__(self, slices):\n if isinstance(slices, list):\n if all([isinstance(s, int) for s in slices]):\n slices = slices,\n slices = tuple(slices)\n elif not isinstance(slices, tuple):\n slices = slices,\n\n if chainer.is_debug():\n n_ellipses = 0\n for s in slices:\n if s is Ellipsis:\n n_ellipses += 1\n if n_ellipses > 1:\n raise ValueError('Only one Ellipsis is allowed')\n\n self.slices = slices\n\n def check_type_forward(self, in_types):\n type_check._argname(in_types, ('x',))\n\n def forward(self, xs):\n slices = tuple([\n backend.from_chx(s) if isinstance(s, chainerx.ndarray) else s\n for s in self.slices])\n return utils.force_array(xs[0][slices]),\n\n def backward(self, indexes, gy):\n return GetItemGrad(\n self.slices, self.inputs[0].shape).apply(gy)\n\n\nclass GetItemGrad(function_node.FunctionNode):\n\n def __init__(self, slices, in_shape):\n self.slices = slices\n self._in_shape = in_shape\n\n def forward(self, inputs):\n slices = tuple([\n backend.from_chx(s) if isinstance(s, chainerx.ndarray) else s\n for s in self.slices])\n\n gy, = inputs\n xp = backend.get_array_module(*inputs)\n gx = xp.zeros(self._in_shape, gy.dtype)\n if xp is numpy:\n try:\n numpy.add.at(gx, slices, gy)\n except IndexError:\n done = False\n # In numpy<1.13, 0-dim boolean index is not supported in\n # numpy.add.at and it's supported for 0-dim arr in\n # arr.__getitem__.\n if not _numpy_supports_0d_bool_index and len(slices) == 1:\n idx = numpy.asanyarray(slices[0])\n if idx.dtype == numpy.dtype(bool):\n # Convert the array and the mask to 1-dim.\n # numpy.add.at with them is supported in older numpy.\n numpy.add.at(gx[None], idx[None], gy)\n done = True\n\n if not done:\n msg = '''\nGetItem does not support backward for this slices. The slices argument is not\nsupported by numpy.add.at, while it is supported by numpy.ndarray.__getitem__.\n\nPlease report this error to the issue tracker with the stack trace,\nthe information of your environment, and your script:\nhttps://github.com/chainer/chainer/issues/new.\n'''\n raise IndexError(msg)\n else:\n gx.scatter_add(slices, inputs[0])\n return gx,\n\n def backward(self, indexes, ggx):\n return GetItem(self.slices).apply(ggx)\n\n\ndef get_item(x, slices):\n \"\"\"Extract elements from array with specified shape, axes and offsets.\n\n Args:\n x (:class:`~chainer.Variable` or :ref:`ndarray`):\n A variable to be sliced.\n slices (int, slice, Ellipsis, None, integer array-like, boolean\\\n array-like or tuple of them):\n An object to specify the selection of elements.\n\n Returns:\n A :class:`~chainer.Variable` object which contains sliced array of\n ``x``.\n\n .. note::\n\n It only supports types that are supported by CUDA's atomicAdd when\n an integer array is included in ``slices``.\n The supported types are ``numpy.float32``, ``numpy.int32``,\n ``numpy.uint32``, ``numpy.uint64`` and ``numpy.ulonglong``.\n\n .. note::\n\n It does not support ``slices`` that contains multiple boolean arrays.\n\n .. note::\n\n See NumPy documentation for details of `indexing\n <https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html>`_.\n\n .. admonition:: Example\n\n >>> x = np.arange(12).reshape((2, 2, 3))\n >>> x\n array([[[ 0, 1, 2],\n [ 3, 4, 5]],\n <BLANKLINE>\n [[ 6, 7, 8],\n [ 9, 10, 11]]])\n >>> F.get_item(x, 0)\n variable([[0, 1, 2],\n [3, 4, 5]])\n >>> F.get_item(x, (0, 0, slice(0, 2, 1))) # equals x[0, 0, 0:2:1]\n variable([0, 1])\n >>> F.get_item(x, (Ellipsis, 2)) # equals x[..., 2]\n variable([[ 2, 5],\n [ 8, 11]])\n >>> F.get_item(x, (1, np.newaxis, 1, 0)) # equals x[1, None, 1, 0]\n variable([9])\n\n \"\"\"\n return GetItem(slices).apply((x,))[0]\n\n\ndef install_variable_get_item():\n variable.Variable.__getitem__ = get_item\n", "path": "chainer/functions/array/get_item.py"}]} | 1,876 | 393 |
gh_patches_debug_2814 | rasdani/github-patches | git_diff | dotkom__onlineweb4-496 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make offline archive look more like event archive
Same as #481. This is mainly about the filtering section.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/api/v0/article.py`
Content:
```
1 #-*- coding: utf-8 -*-
2 from copy import copy
3
4 from django.conf import settings
5 from django.template.defaultfilters import slugify
6 from django.utils import timezone
7
8 from filebrowser.base import FileObject
9 from filebrowser.settings import VERSIONS
10 from tastypie import fields
11 from tastypie.resources import ModelResource
12
13 from apps.api.v0.authentication import UserResource
14 from apps.article.models import Article, ArticleTag, Tag
15
16
17
18
19 class ArticleResource(ModelResource):
20 author = fields.ToOneField(UserResource, 'created_by')
21
22 def alter_list_data_to_serialize(self, request, data):
23 # Renames list data 'object' to 'articles'.
24 if isinstance(data, dict):
25 data['articles'] = copy(data['objects'])
26 del(data['objects'])
27 return data
28
29 # Making multiple images for the article
30 def dehydrate(self, bundle):
31
32 # Setting slug-field
33 bundle.data['slug'] = slugify(bundle.data['heading'])
34
35 # If image is set
36 if bundle.data['image']:
37 # Parse to FileObject used by Filebrowser
38 temp_image = FileObject(bundle.data['image'])
39
40 # Itterate the different versions (by key)
41 for ver in VERSIONS.keys():
42 # Check if the key start with article_ (if it does, we want to crop to that size)
43 if ver.startswith('article_'):
44 # Adding the new image to the object
45 bundle.data['image_'+ver] = temp_image.version_generate(ver).url
46
47 # Unset the image-field
48 del(bundle.data['image'])
49
50 # Returning washed object
51 return bundle
52
53 def get_object_list(self, request):
54 # Getting the GET-params
55 if 'tag' in request.GET:
56 request_tag = request.GET['tag']
57 else:
58 request_tag = None
59
60 if 'year' in request.GET:
61 request_year = request.GET['year']
62 else:
63 request_year = None
64
65 if 'month' in request.GET:
66 request_month = request.GET['month']
67 else:
68 request_month = None
69
70 # Check filtering here
71 if (request_year is not None):
72 if (request_month is not None):
73 # Filtering on both year and month
74 queryset = Article.objects.filter(published_date__year=request_year, published_date__month=request_month, published_date__lte=timezone.now()).order_by('-published_date')
75 else:
76 # Filtering on only year
77 queryset = Article.objects.filter(published_date__year=request_year, published_date__lte=timezone.now()).order_by('-published_date')
78 else:
79 # Not filtering on year, check if filtering on slug (tag) or return default query
80 if (request_tag is not None):
81 # Filtering on slug
82 slug_query = Tag.objects.filter(slug = request_tag)
83 slug_connect = ArticleTag.objects.filter(tag = slug_query).values('article_id')
84 queryset = Article.objects.filter(id__in = slug_connect, published_date__lte=timezone.now()).order_by('-published_date')
85 else:
86 # No filtering at all, return default query
87 queryset = Article.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')
88 return queryset
89
90 class Meta:
91 API_LIMIT_PER_PAGE = 9
92 queryset = Article.objects.filter(published_date__lte=timezone.now())
93 resource_name = 'article/all'
94 ordering = ['-published_date']
95 include_absolute_url = True
96 filtering = {
97 'featured' : ('exact',),
98 'published_date' : ('gte',),
99 }
100
101 class ArticleLatestResource(ModelResource):
102 author = fields.ToOneField(UserResource, 'created_by')
103
104 class Meta:
105 queryset = Article.objects.filter(published_date__lte=timezone.now())
106
107 resource_name = 'article/latest'
108 filtering = {
109 'featured': ('exact',)
110 }
111 ordering = ['-published_date']
112 max_limit = 25
113 def alter_list_data_to_serialize(self, request, data):
114 # Renames list data 'object' to 'articles'.
115 if isinstance(data, dict):
116 data['articles'] = copy(data['objects'])
117 del(data['objects'])
118 return data
119 def dehydrate(self, bundle):
120 bundle.data['slug'] = slugify(bundle.data['heading'])
121 return bundle
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/api/v0/article.py b/apps/api/v0/article.py
--- a/apps/api/v0/article.py
+++ b/apps/api/v0/article.py
@@ -17,7 +17,7 @@
class ArticleResource(ModelResource):
- author = fields.ToOneField(UserResource, 'created_by')
+ author = fields.ToOneField(UserResource, 'created_by', full=True)
def alter_list_data_to_serialize(self, request, data):
# Renames list data 'object' to 'articles'.
| {"golden_diff": "diff --git a/apps/api/v0/article.py b/apps/api/v0/article.py\n--- a/apps/api/v0/article.py\n+++ b/apps/api/v0/article.py\n@@ -17,7 +17,7 @@\n \n \n class ArticleResource(ModelResource):\n- author = fields.ToOneField(UserResource, 'created_by')\n+ author = fields.ToOneField(UserResource, 'created_by', full=True)\n \n def alter_list_data_to_serialize(self, request, data):\n # Renames list data 'object' to 'articles'.\n", "issue": "Make offline archive look more like event archive\nSame as #481. This is mainly about the filtering section.\n\n", "before_files": [{"content": "#-*- coding: utf-8 -*-\nfrom copy import copy\n\nfrom django.conf import settings\nfrom django.template.defaultfilters import slugify\nfrom django.utils import timezone\n\nfrom filebrowser.base import FileObject\nfrom filebrowser.settings import VERSIONS\nfrom tastypie import fields\nfrom tastypie.resources import ModelResource\n\nfrom apps.api.v0.authentication import UserResource\nfrom apps.article.models import Article, ArticleTag, Tag\n\n\n\n\nclass ArticleResource(ModelResource):\n author = fields.ToOneField(UserResource, 'created_by')\n \n def alter_list_data_to_serialize(self, request, data):\n # Renames list data 'object' to 'articles'.\n if isinstance(data, dict):\n data['articles'] = copy(data['objects'])\n del(data['objects'])\n return data\n \n # Making multiple images for the article\n def dehydrate(self, bundle):\n \n # Setting slug-field\n bundle.data['slug'] = slugify(bundle.data['heading'])\n \n # If image is set\n if bundle.data['image']:\n # Parse to FileObject used by Filebrowser\n temp_image = FileObject(bundle.data['image'])\n \n # Itterate the different versions (by key)\n for ver in VERSIONS.keys():\n # Check if the key start with article_ (if it does, we want to crop to that size)\n if ver.startswith('article_'):\n # Adding the new image to the object\n bundle.data['image_'+ver] = temp_image.version_generate(ver).url\n \n # Unset the image-field\n del(bundle.data['image'])\n \n # Returning washed object\n return bundle\n \n def get_object_list(self, request):\n # Getting the GET-params\n if 'tag' in request.GET:\n request_tag = request.GET['tag']\n else:\n request_tag = None\n \n if 'year' in request.GET:\n request_year = request.GET['year']\n else:\n request_year = None\n \n if 'month' in request.GET:\n request_month = request.GET['month']\n else:\n request_month = None\n \n # Check filtering here\n if (request_year is not None):\n if (request_month is not None):\n # Filtering on both year and month\n queryset = Article.objects.filter(published_date__year=request_year, published_date__month=request_month, published_date__lte=timezone.now()).order_by('-published_date')\n else:\n # Filtering on only year\n queryset = Article.objects.filter(published_date__year=request_year, published_date__lte=timezone.now()).order_by('-published_date')\n else:\n # Not filtering on year, check if filtering on slug (tag) or return default query\n if (request_tag is not None):\n # Filtering on slug\n slug_query = Tag.objects.filter(slug = request_tag)\n slug_connect = ArticleTag.objects.filter(tag = slug_query).values('article_id')\n queryset = Article.objects.filter(id__in = slug_connect, published_date__lte=timezone.now()).order_by('-published_date')\n else:\n # No filtering at all, return default query\n queryset = Article.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')\n return queryset\n \n class Meta: \n API_LIMIT_PER_PAGE = 9\n queryset = Article.objects.filter(published_date__lte=timezone.now())\n resource_name = 'article/all'\n ordering = ['-published_date']\n include_absolute_url = True\n filtering = {\n 'featured' : ('exact',),\n 'published_date' : ('gte',),\n }\n\nclass ArticleLatestResource(ModelResource):\n author = fields.ToOneField(UserResource, 'created_by')\n \n class Meta:\n queryset = Article.objects.filter(published_date__lte=timezone.now())\n \n resource_name = 'article/latest'\n filtering = {\n 'featured': ('exact',)\n }\n ordering = ['-published_date']\n max_limit = 25\n def alter_list_data_to_serialize(self, request, data):\n # Renames list data 'object' to 'articles'.\n if isinstance(data, dict): \n data['articles'] = copy(data['objects'])\n del(data['objects'])\n return data\n def dehydrate(self, bundle):\n bundle.data['slug'] = slugify(bundle.data['heading'])\n return bundle\n", "path": "apps/api/v0/article.py"}], "after_files": [{"content": "#-*- coding: utf-8 -*-\nfrom copy import copy\n\nfrom django.conf import settings\nfrom django.template.defaultfilters import slugify\nfrom django.utils import timezone\n\nfrom filebrowser.base import FileObject\nfrom filebrowser.settings import VERSIONS\nfrom tastypie import fields\nfrom tastypie.resources import ModelResource\n\nfrom apps.api.v0.authentication import UserResource\nfrom apps.article.models import Article, ArticleTag, Tag\n\n\n\n\nclass ArticleResource(ModelResource):\n author = fields.ToOneField(UserResource, 'created_by', full=True)\n \n def alter_list_data_to_serialize(self, request, data):\n # Renames list data 'object' to 'articles'.\n if isinstance(data, dict):\n data['articles'] = copy(data['objects'])\n del(data['objects'])\n return data\n \n # Making multiple images for the article\n def dehydrate(self, bundle):\n \n # Setting slug-field\n bundle.data['slug'] = slugify(bundle.data['heading'])\n \n # If image is set\n if bundle.data['image']:\n # Parse to FileObject used by Filebrowser\n temp_image = FileObject(bundle.data['image'])\n \n # Itterate the different versions (by key)\n for ver in VERSIONS.keys():\n # Check if the key start with article_ (if it does, we want to crop to that size)\n if ver.startswith('article_'):\n # Adding the new image to the object\n bundle.data['image_'+ver] = temp_image.version_generate(ver).url\n \n # Unset the image-field\n del(bundle.data['image'])\n \n # Returning washed object\n return bundle\n \n def get_object_list(self, request):\n # Getting the GET-params\n if 'tag' in request.GET:\n request_tag = request.GET['tag']\n else:\n request_tag = None\n \n if 'year' in request.GET:\n request_year = request.GET['year']\n else:\n request_year = None\n \n if 'month' in request.GET:\n request_month = request.GET['month']\n else:\n request_month = None\n \n # Check filtering here\n if (request_year is not None):\n if (request_month is not None):\n # Filtering on both year and month\n queryset = Article.objects.filter(published_date__year=request_year, published_date__month=request_month, published_date__lte=timezone.now()).order_by('-published_date')\n else:\n # Filtering on only year\n queryset = Article.objects.filter(published_date__year=request_year, published_date__lte=timezone.now()).order_by('-published_date')\n else:\n # Not filtering on year, check if filtering on slug (tag) or return default query\n if (request_tag is not None):\n # Filtering on slug\n slug_query = Tag.objects.filter(slug = request_tag)\n slug_connect = ArticleTag.objects.filter(tag = slug_query).values('article_id')\n queryset = Article.objects.filter(id__in = slug_connect, published_date__lte=timezone.now()).order_by('-published_date')\n else:\n # No filtering at all, return default query\n queryset = Article.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')\n return queryset\n \n class Meta: \n API_LIMIT_PER_PAGE = 9\n queryset = Article.objects.filter(published_date__lte=timezone.now())\n resource_name = 'article/all'\n ordering = ['-published_date']\n include_absolute_url = True\n filtering = {\n 'featured' : ('exact',),\n 'published_date' : ('gte',),\n }\n\nclass ArticleLatestResource(ModelResource):\n author = fields.ToOneField(UserResource, 'created_by')\n \n class Meta:\n queryset = Article.objects.filter(published_date__lte=timezone.now())\n \n resource_name = 'article/latest'\n filtering = {\n 'featured': ('exact',)\n }\n ordering = ['-published_date']\n max_limit = 25\n def alter_list_data_to_serialize(self, request, data):\n # Renames list data 'object' to 'articles'.\n if isinstance(data, dict): \n data['articles'] = copy(data['objects'])\n del(data['objects'])\n return data\n def dehydrate(self, bundle):\n bundle.data['slug'] = slugify(bundle.data['heading'])\n return bundle\n", "path": "apps/api/v0/article.py"}]} | 1,473 | 115 |
gh_patches_debug_14108 | rasdani/github-patches | git_diff | wright-group__WrightTools-726 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Group is not defined in collection
https://github.com/wright-group/WrightTools/blob/ca056aa600f341501a99d2ea4d11f7d74047bc26/WrightTools/_open.py#L48
Statement will cause an attribute error. Not tested currently
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `WrightTools/_open.py`
Content:
```
1 """Generic open method for wt5 files."""
2
3
4 # --- import -------------------------------------------------------------------------------------
5
6
7 import posixpath
8
9 import h5py
10
11 from . import collection as wt_collection
12 from . import data as wt_data
13
14
15 # --- define -------------------------------------------------------------------------------------
16
17
18 __all__ = ["open"]
19
20
21 # --- functions ----------------------------------------------------------------------------------
22
23
24 def open(filepath, edit_local=False):
25 """Open any wt5 file, returning the top-level object (data or collection).
26
27 Parameters
28 ----------
29 filepath : string
30 Path to file.
31 edit_local : boolean (optional)
32 If True, the file itself will be opened for editing. Otherwise, a
33 copy will be created. Default is False.
34
35 Returns
36 -------
37 WrightTools Collection or Data
38 Root-level object in file.
39 """
40 f = h5py.File(filepath)
41 class_name = f[posixpath.sep].attrs["class"]
42 name = f[posixpath.sep].attrs["name"]
43 if class_name == "Data":
44 return wt_data.Data(filepath=filepath, name=name, edit_local=edit_local)
45 elif class_name == "Collection":
46 return wt_collection.Collection(filepath=filepath, name=name, edit_local=edit_local)
47 else:
48 return wt_collection.Group(filepath=filepath, name=name, edit_local=edit_local)
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/WrightTools/_open.py b/WrightTools/_open.py
--- a/WrightTools/_open.py
+++ b/WrightTools/_open.py
@@ -10,6 +10,7 @@
from . import collection as wt_collection
from . import data as wt_data
+from . import _group as wt_group
# --- define -------------------------------------------------------------------------------------
@@ -45,4 +46,4 @@
elif class_name == "Collection":
return wt_collection.Collection(filepath=filepath, name=name, edit_local=edit_local)
else:
- return wt_collection.Group(filepath=filepath, name=name, edit_local=edit_local)
+ return wt_group.Group(filepath=filepath, name=name, edit_local=edit_local)
| {"golden_diff": "diff --git a/WrightTools/_open.py b/WrightTools/_open.py\n--- a/WrightTools/_open.py\n+++ b/WrightTools/_open.py\n@@ -10,6 +10,7 @@\n \n from . import collection as wt_collection\n from . import data as wt_data\n+from . import _group as wt_group\n \n \n # --- define -------------------------------------------------------------------------------------\n@@ -45,4 +46,4 @@\n elif class_name == \"Collection\":\n return wt_collection.Collection(filepath=filepath, name=name, edit_local=edit_local)\n else:\n- return wt_collection.Group(filepath=filepath, name=name, edit_local=edit_local)\n+ return wt_group.Group(filepath=filepath, name=name, edit_local=edit_local)\n", "issue": "Group is not defined in collection\nhttps://github.com/wright-group/WrightTools/blob/ca056aa600f341501a99d2ea4d11f7d74047bc26/WrightTools/_open.py#L48\r\n\r\nStatement will cause an attribute error. Not tested currently\n", "before_files": [{"content": "\"\"\"Generic open method for wt5 files.\"\"\"\n\n\n# --- import -------------------------------------------------------------------------------------\n\n\nimport posixpath\n\nimport h5py\n\nfrom . import collection as wt_collection\nfrom . import data as wt_data\n\n\n# --- define -------------------------------------------------------------------------------------\n\n\n__all__ = [\"open\"]\n\n\n# --- functions ----------------------------------------------------------------------------------\n\n\ndef open(filepath, edit_local=False):\n \"\"\"Open any wt5 file, returning the top-level object (data or collection).\n\n Parameters\n ----------\n filepath : string\n Path to file.\n edit_local : boolean (optional)\n If True, the file itself will be opened for editing. Otherwise, a\n copy will be created. Default is False.\n\n Returns\n -------\n WrightTools Collection or Data\n Root-level object in file.\n \"\"\"\n f = h5py.File(filepath)\n class_name = f[posixpath.sep].attrs[\"class\"]\n name = f[posixpath.sep].attrs[\"name\"]\n if class_name == \"Data\":\n return wt_data.Data(filepath=filepath, name=name, edit_local=edit_local)\n elif class_name == \"Collection\":\n return wt_collection.Collection(filepath=filepath, name=name, edit_local=edit_local)\n else:\n return wt_collection.Group(filepath=filepath, name=name, edit_local=edit_local)\n", "path": "WrightTools/_open.py"}], "after_files": [{"content": "\"\"\"Generic open method for wt5 files.\"\"\"\n\n\n# --- import -------------------------------------------------------------------------------------\n\n\nimport posixpath\n\nimport h5py\n\nfrom . import collection as wt_collection\nfrom . import data as wt_data\nfrom . import _group as wt_group\n\n\n# --- define -------------------------------------------------------------------------------------\n\n\n__all__ = [\"open\"]\n\n\n# --- functions ----------------------------------------------------------------------------------\n\n\ndef open(filepath, edit_local=False):\n \"\"\"Open any wt5 file, returning the top-level object (data or collection).\n\n Parameters\n ----------\n filepath : string\n Path to file.\n edit_local : boolean (optional)\n If True, the file itself will be opened for editing. Otherwise, a\n copy will be created. Default is False.\n\n Returns\n -------\n WrightTools Collection or Data\n Root-level object in file.\n \"\"\"\n f = h5py.File(filepath)\n class_name = f[posixpath.sep].attrs[\"class\"]\n name = f[posixpath.sep].attrs[\"name\"]\n if class_name == \"Data\":\n return wt_data.Data(filepath=filepath, name=name, edit_local=edit_local)\n elif class_name == \"Collection\":\n return wt_collection.Collection(filepath=filepath, name=name, edit_local=edit_local)\n else:\n return wt_group.Group(filepath=filepath, name=name, edit_local=edit_local)\n", "path": "WrightTools/_open.py"}]} | 703 | 160 |
gh_patches_debug_14577 | rasdani/github-patches | git_diff | urllib3__urllib3-2289 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecate NTLMConnectionPool in 1.26.x
As was mentioned in https://github.com/urllib3/urllib3/pull/2278#issuecomment-864414599 and https://github.com/urllib3/urllib3/pull/2278#issuecomment-864450016 we're moving to remove `NTLMConnectionPool` and the `urllib3.contrib.nltmpool` module from urllib3 in v2.0 if we don't find a new maintainer for the module (perhaps as a third-party package ie `urllib3-ntlmpool`?)
- The module is not covered by our test suite.
- It is not clear even which pypi package is needed for it.
- It has fallen into disrepair (e.g. timeout/ssl/other options not being respected).
- According to Wikipedia, "Since 2010, Microsoft no longer recommends NTLM in applications"
- Seems like it's not used often, if at all.
In the `1.26.x` branch we should unconditionally raise a `DeprecationWarning` when the module is imported. Should link to this issue with a call to action to comment in the issue if they are a user. This should help us better discover who (if any) our users are here so we can better make a decision.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/urllib3/contrib/ntlmpool.py`
Content:
```
1 """
2 NTLM authenticating pool, contributed by erikcederstran
3
4 Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
5 """
6 from __future__ import absolute_import
7
8 from logging import getLogger
9
10 from ntlm import ntlm
11
12 from .. import HTTPSConnectionPool
13 from ..packages.six.moves.http_client import HTTPSConnection
14
15 log = getLogger(__name__)
16
17
18 class NTLMConnectionPool(HTTPSConnectionPool):
19 """
20 Implements an NTLM authentication version of an urllib3 connection pool
21 """
22
23 scheme = "https"
24
25 def __init__(self, user, pw, authurl, *args, **kwargs):
26 """
27 authurl is a random URL on the server that is protected by NTLM.
28 user is the Windows user, probably in the DOMAIN\\username format.
29 pw is the password for the user.
30 """
31 super(NTLMConnectionPool, self).__init__(*args, **kwargs)
32 self.authurl = authurl
33 self.rawuser = user
34 user_parts = user.split("\\", 1)
35 self.domain = user_parts[0].upper()
36 self.user = user_parts[1]
37 self.pw = pw
38
39 def _new_conn(self):
40 # Performs the NTLM handshake that secures the connection. The socket
41 # must be kept open while requests are performed.
42 self.num_connections += 1
43 log.debug(
44 "Starting NTLM HTTPS connection no. %d: https://%s%s",
45 self.num_connections,
46 self.host,
47 self.authurl,
48 )
49
50 headers = {"Connection": "Keep-Alive"}
51 req_header = "Authorization"
52 resp_header = "www-authenticate"
53
54 conn = HTTPSConnection(host=self.host, port=self.port)
55
56 # Send negotiation message
57 headers[req_header] = "NTLM %s" % ntlm.create_NTLM_NEGOTIATE_MESSAGE(
58 self.rawuser
59 )
60 log.debug("Request headers: %s", headers)
61 conn.request("GET", self.authurl, None, headers)
62 res = conn.getresponse()
63 reshdr = dict(res.getheaders())
64 log.debug("Response status: %s %s", res.status, res.reason)
65 log.debug("Response headers: %s", reshdr)
66 log.debug("Response data: %s [...]", res.read(100))
67
68 # Remove the reference to the socket, so that it can not be closed by
69 # the response object (we want to keep the socket open)
70 res.fp = None
71
72 # Server should respond with a challenge message
73 auth_header_values = reshdr[resp_header].split(", ")
74 auth_header_value = None
75 for s in auth_header_values:
76 if s[:5] == "NTLM ":
77 auth_header_value = s[5:]
78 if auth_header_value is None:
79 raise Exception(
80 "Unexpected %s response header: %s" % (resp_header, reshdr[resp_header])
81 )
82
83 # Send authentication message
84 ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE(
85 auth_header_value
86 )
87 auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(
88 ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags
89 )
90 headers[req_header] = "NTLM %s" % auth_msg
91 log.debug("Request headers: %s", headers)
92 conn.request("GET", self.authurl, None, headers)
93 res = conn.getresponse()
94 log.debug("Response status: %s %s", res.status, res.reason)
95 log.debug("Response headers: %s", dict(res.getheaders()))
96 log.debug("Response data: %s [...]", res.read()[:100])
97 if res.status != 200:
98 if res.status == 401:
99 raise Exception("Server rejected request: wrong username or password")
100 raise Exception("Wrong server response: %s %s" % (res.status, res.reason))
101
102 res.fp = None
103 log.debug("Connection established")
104 return conn
105
106 def urlopen(
107 self,
108 method,
109 url,
110 body=None,
111 headers=None,
112 retries=3,
113 redirect=True,
114 assert_same_host=True,
115 ):
116 if headers is None:
117 headers = {}
118 headers["Connection"] = "Keep-Alive"
119 return super(NTLMConnectionPool, self).urlopen(
120 method, url, body, headers, retries, redirect, assert_same_host
121 )
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/urllib3/contrib/ntlmpool.py b/src/urllib3/contrib/ntlmpool.py
--- a/src/urllib3/contrib/ntlmpool.py
+++ b/src/urllib3/contrib/ntlmpool.py
@@ -5,6 +5,7 @@
"""
from __future__ import absolute_import
+import warnings
from logging import getLogger
from ntlm import ntlm
@@ -12,6 +13,14 @@
from .. import HTTPSConnectionPool
from ..packages.six.moves.http_client import HTTPSConnection
+warnings.warn(
+ "The 'urllib3.contrib.ntlmpool' module is deprecated and will be removed "
+ "in urllib3 v2.0 release, urllib3 is not able to support it properly due "
+ "to reasons listed in issue: https://github.com/urllib3/urllib3/issues/2282. "
+ "If you are a user of this module please comment in the mentioned issue.",
+ DeprecationWarning,
+)
+
log = getLogger(__name__)
| {"golden_diff": "diff --git a/src/urllib3/contrib/ntlmpool.py b/src/urllib3/contrib/ntlmpool.py\n--- a/src/urllib3/contrib/ntlmpool.py\n+++ b/src/urllib3/contrib/ntlmpool.py\n@@ -5,6 +5,7 @@\n \"\"\"\n from __future__ import absolute_import\n \n+import warnings\n from logging import getLogger\n \n from ntlm import ntlm\n@@ -12,6 +13,14 @@\n from .. import HTTPSConnectionPool\n from ..packages.six.moves.http_client import HTTPSConnection\n \n+warnings.warn(\n+ \"The 'urllib3.contrib.ntlmpool' module is deprecated and will be removed \"\n+ \"in urllib3 v2.0 release, urllib3 is not able to support it properly due \"\n+ \"to reasons listed in issue: https://github.com/urllib3/urllib3/issues/2282. \"\n+ \"If you are a user of this module please comment in the mentioned issue.\",\n+ DeprecationWarning,\n+)\n+\n log = getLogger(__name__)\n", "issue": "Deprecate NTLMConnectionPool in 1.26.x\nAs was mentioned in https://github.com/urllib3/urllib3/pull/2278#issuecomment-864414599 and https://github.com/urllib3/urllib3/pull/2278#issuecomment-864450016 we're moving to remove `NTLMConnectionPool` and the `urllib3.contrib.nltmpool` module from urllib3 in v2.0 if we don't find a new maintainer for the module (perhaps as a third-party package ie `urllib3-ntlmpool`?)\r\n\r\n- The module is not covered by our test suite.\r\n- It is not clear even which pypi package is needed for it.\r\n- It has fallen into disrepair (e.g. timeout/ssl/other options not being respected).\r\n- According to Wikipedia, \"Since 2010, Microsoft no longer recommends NTLM in applications\"\r\n- Seems like it's not used often, if at all.\r\n\r\nIn the `1.26.x` branch we should unconditionally raise a `DeprecationWarning` when the module is imported. Should link to this issue with a call to action to comment in the issue if they are a user. This should help us better discover who (if any) our users are here so we can better make a decision.\n", "before_files": [{"content": "\"\"\"\nNTLM authenticating pool, contributed by erikcederstran\n\nIssue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10\n\"\"\"\nfrom __future__ import absolute_import\n\nfrom logging import getLogger\n\nfrom ntlm import ntlm\n\nfrom .. import HTTPSConnectionPool\nfrom ..packages.six.moves.http_client import HTTPSConnection\n\nlog = getLogger(__name__)\n\n\nclass NTLMConnectionPool(HTTPSConnectionPool):\n \"\"\"\n Implements an NTLM authentication version of an urllib3 connection pool\n \"\"\"\n\n scheme = \"https\"\n\n def __init__(self, user, pw, authurl, *args, **kwargs):\n \"\"\"\n authurl is a random URL on the server that is protected by NTLM.\n user is the Windows user, probably in the DOMAIN\\\\username format.\n pw is the password for the user.\n \"\"\"\n super(NTLMConnectionPool, self).__init__(*args, **kwargs)\n self.authurl = authurl\n self.rawuser = user\n user_parts = user.split(\"\\\\\", 1)\n self.domain = user_parts[0].upper()\n self.user = user_parts[1]\n self.pw = pw\n\n def _new_conn(self):\n # Performs the NTLM handshake that secures the connection. The socket\n # must be kept open while requests are performed.\n self.num_connections += 1\n log.debug(\n \"Starting NTLM HTTPS connection no. %d: https://%s%s\",\n self.num_connections,\n self.host,\n self.authurl,\n )\n\n headers = {\"Connection\": \"Keep-Alive\"}\n req_header = \"Authorization\"\n resp_header = \"www-authenticate\"\n\n conn = HTTPSConnection(host=self.host, port=self.port)\n\n # Send negotiation message\n headers[req_header] = \"NTLM %s\" % ntlm.create_NTLM_NEGOTIATE_MESSAGE(\n self.rawuser\n )\n log.debug(\"Request headers: %s\", headers)\n conn.request(\"GET\", self.authurl, None, headers)\n res = conn.getresponse()\n reshdr = dict(res.getheaders())\n log.debug(\"Response status: %s %s\", res.status, res.reason)\n log.debug(\"Response headers: %s\", reshdr)\n log.debug(\"Response data: %s [...]\", res.read(100))\n\n # Remove the reference to the socket, so that it can not be closed by\n # the response object (we want to keep the socket open)\n res.fp = None\n\n # Server should respond with a challenge message\n auth_header_values = reshdr[resp_header].split(\", \")\n auth_header_value = None\n for s in auth_header_values:\n if s[:5] == \"NTLM \":\n auth_header_value = s[5:]\n if auth_header_value is None:\n raise Exception(\n \"Unexpected %s response header: %s\" % (resp_header, reshdr[resp_header])\n )\n\n # Send authentication message\n ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE(\n auth_header_value\n )\n auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(\n ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags\n )\n headers[req_header] = \"NTLM %s\" % auth_msg\n log.debug(\"Request headers: %s\", headers)\n conn.request(\"GET\", self.authurl, None, headers)\n res = conn.getresponse()\n log.debug(\"Response status: %s %s\", res.status, res.reason)\n log.debug(\"Response headers: %s\", dict(res.getheaders()))\n log.debug(\"Response data: %s [...]\", res.read()[:100])\n if res.status != 200:\n if res.status == 401:\n raise Exception(\"Server rejected request: wrong username or password\")\n raise Exception(\"Wrong server response: %s %s\" % (res.status, res.reason))\n\n res.fp = None\n log.debug(\"Connection established\")\n return conn\n\n def urlopen(\n self,\n method,\n url,\n body=None,\n headers=None,\n retries=3,\n redirect=True,\n assert_same_host=True,\n ):\n if headers is None:\n headers = {}\n headers[\"Connection\"] = \"Keep-Alive\"\n return super(NTLMConnectionPool, self).urlopen(\n method, url, body, headers, retries, redirect, assert_same_host\n )\n", "path": "src/urllib3/contrib/ntlmpool.py"}], "after_files": [{"content": "\"\"\"\nNTLM authenticating pool, contributed by erikcederstran\n\nIssue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10\n\"\"\"\nfrom __future__ import absolute_import\n\nimport warnings\nfrom logging import getLogger\n\nfrom ntlm import ntlm\n\nfrom .. import HTTPSConnectionPool\nfrom ..packages.six.moves.http_client import HTTPSConnection\n\nwarnings.warn(\n \"The 'urllib3.contrib.ntlmpool' module is deprecated and will be removed \"\n \"in urllib3 v2.0 release, urllib3 is not able to support it properly due \"\n \"to reasons listed in issue: https://github.com/urllib3/urllib3/issues/2282. \"\n \"If you are a user of this module please comment in the mentioned issue.\",\n DeprecationWarning,\n)\n\nlog = getLogger(__name__)\n\n\nclass NTLMConnectionPool(HTTPSConnectionPool):\n \"\"\"\n Implements an NTLM authentication version of an urllib3 connection pool\n \"\"\"\n\n scheme = \"https\"\n\n def __init__(self, user, pw, authurl, *args, **kwargs):\n \"\"\"\n authurl is a random URL on the server that is protected by NTLM.\n user is the Windows user, probably in the DOMAIN\\\\username format.\n pw is the password for the user.\n \"\"\"\n super(NTLMConnectionPool, self).__init__(*args, **kwargs)\n self.authurl = authurl\n self.rawuser = user\n user_parts = user.split(\"\\\\\", 1)\n self.domain = user_parts[0].upper()\n self.user = user_parts[1]\n self.pw = pw\n\n def _new_conn(self):\n # Performs the NTLM handshake that secures the connection. The socket\n # must be kept open while requests are performed.\n self.num_connections += 1\n log.debug(\n \"Starting NTLM HTTPS connection no. %d: https://%s%s\",\n self.num_connections,\n self.host,\n self.authurl,\n )\n\n headers = {\"Connection\": \"Keep-Alive\"}\n req_header = \"Authorization\"\n resp_header = \"www-authenticate\"\n\n conn = HTTPSConnection(host=self.host, port=self.port)\n\n # Send negotiation message\n headers[req_header] = \"NTLM %s\" % ntlm.create_NTLM_NEGOTIATE_MESSAGE(\n self.rawuser\n )\n log.debug(\"Request headers: %s\", headers)\n conn.request(\"GET\", self.authurl, None, headers)\n res = conn.getresponse()\n reshdr = dict(res.getheaders())\n log.debug(\"Response status: %s %s\", res.status, res.reason)\n log.debug(\"Response headers: %s\", reshdr)\n log.debug(\"Response data: %s [...]\", res.read(100))\n\n # Remove the reference to the socket, so that it can not be closed by\n # the response object (we want to keep the socket open)\n res.fp = None\n\n # Server should respond with a challenge message\n auth_header_values = reshdr[resp_header].split(\", \")\n auth_header_value = None\n for s in auth_header_values:\n if s[:5] == \"NTLM \":\n auth_header_value = s[5:]\n if auth_header_value is None:\n raise Exception(\n \"Unexpected %s response header: %s\" % (resp_header, reshdr[resp_header])\n )\n\n # Send authentication message\n ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE(\n auth_header_value\n )\n auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(\n ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags\n )\n headers[req_header] = \"NTLM %s\" % auth_msg\n log.debug(\"Request headers: %s\", headers)\n conn.request(\"GET\", self.authurl, None, headers)\n res = conn.getresponse()\n log.debug(\"Response status: %s %s\", res.status, res.reason)\n log.debug(\"Response headers: %s\", dict(res.getheaders()))\n log.debug(\"Response data: %s [...]\", res.read()[:100])\n if res.status != 200:\n if res.status == 401:\n raise Exception(\"Server rejected request: wrong username or password\")\n raise Exception(\"Wrong server response: %s %s\" % (res.status, res.reason))\n\n res.fp = None\n log.debug(\"Connection established\")\n return conn\n\n def urlopen(\n self,\n method,\n url,\n body=None,\n headers=None,\n retries=3,\n redirect=True,\n assert_same_host=True,\n ):\n if headers is None:\n headers = {}\n headers[\"Connection\"] = \"Keep-Alive\"\n return super(NTLMConnectionPool, self).urlopen(\n method, url, body, headers, retries, redirect, assert_same_host\n )\n", "path": "src/urllib3/contrib/ntlmpool.py"}]} | 1,816 | 241 |
gh_patches_debug_10228 | rasdani/github-patches | git_diff | fedora-infra__bodhi-1520 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py uses server default
The ```alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py``` migration uses a server default, which is not allowed by BDR:
```
[bowlofeggs@bodhi-backend01 ~][STG]$ sudo /usr/bin/alembic -c /etc/bodhi/alembic.ini upgrade head
INFO [alembic.runtime.migration] Context impl PostgresqlImpl.
INFO [alembic.runtime.migration] Will assume transactional DDL.
INFO [alembic.runtime.migration] Running upgrade 12d3e8695f90 -> 9241378c92ab, Convert the builds table to be polymorphic.
Traceback (most recent call last):
File "/usr/bin/alembic", line 12, in <module>
sys.exit(load_entry_point('alembic', 'console_scripts', 'alembic')())
File "/usr/lib/python2.7/site-packages/alembic/config.py", line 479, in main
CommandLine(prog=prog).main(argv=argv)
File "/usr/lib/python2.7/site-packages/alembic/config.py", line 473, in main
self.run_cmd(cfg, options)
File "/usr/lib/python2.7/site-packages/alembic/config.py", line 456, in run_cmd
**dict((k, getattr(options, k)) for k in kwarg)
File "/usr/lib/python2.7/site-packages/alembic/command.py", line 174, in upgrade
script.run_env()
File "/usr/lib/python2.7/site-packages/alembic/script/base.py", line 397, in run_env
util.load_python_file(self.dir, 'env.py')
File "/usr/lib/python2.7/site-packages/alembic/util/pyfiles.py", line 93, in load_python_file
module = load_module_py(module_id, path)
File "/usr/lib/python2.7/site-packages/alembic/util/compat.py", line 79, in load_module_py
mod = imp.load_source(module_id, path, fp)
File "/usr/share/bodhi/alembic/env.py", line 83, in <module>
run_migrations_online()
File "/usr/share/bodhi/alembic/env.py", line 76, in run_migrations_online
context.run_migrations()
File "<string>", line 8, in run_migrations
File "/usr/lib/python2.7/site-packages/alembic/runtime/environment.py", line 797, in run_migrations
self.get_context().run_migrations(**kw)
File "/usr/lib/python2.7/site-packages/alembic/runtime/migration.py", line 312, in run_migrations
step.migration_fn(**kw)
File "/usr/share/bodhi/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py", line 19, in upgrade
op.add_column('builds', sa.Column('type', sa.Integer(), nullable=False, server_default=u'1'))
File "<string>", line 8, in add_column
File "<string>", line 3, in add_column
File "/usr/lib/python2.7/site-packages/alembic/operations/ops.py", line 1535, in add_column
return operations.invoke(op)
File "/usr/lib/python2.7/site-packages/alembic/operations/base.py", line 318, in invoke
return fn(self, operation)
File "/usr/lib/python2.7/site-packages/alembic/operations/toimpl.py", line 123, in add_column
schema=schema
File "/usr/lib/python2.7/site-packages/alembic/ddl/impl.py", line 172, in add_column
self._exec(base.AddColumn(table_name, column, schema=schema))
File "/usr/lib/python2.7/site-packages/alembic/ddl/impl.py", line 118, in _exec
return conn.execute(construct, *multiparams, **params)
File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 914, in execute
return meth(self, multiparams, params)
File "/usr/lib64/python2.7/site-packages/sqlalchemy/sql/ddl.py", line 68, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 968, in _execute_ddl
compiled
File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 1146, in _execute_context
context)
File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 1341, in _handle_dbapi_exception
exc_info
File "/usr/lib64/python2.7/site-packages/sqlalchemy/util/compat.py", line 203, in raise_from_cause
reraise(type(exception), exception, tb=exc_tb, cause=cause)
File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py", line 1139, in _execute_context
context)
File "/usr/lib64/python2.7/site-packages/sqlalchemy/engine/default.py", line 450, in do_execute
cursor.execute(statement, parameters)
sqlalchemy.exc.NotSupportedError: (psycopg2.NotSupportedError) ALTER TABLE ... ADD COLUMN ... DEFAULT may only affect UNLOGGED or TEMPORARY tables when BDR is active; builds is a regular table
[SQL: "ALTER TABLE builds ADD COLUMN type INTEGER DEFAULT '1' NOT NULL"]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py`
Content:
```
1 """Convert the builds table to be polymorphic.
2
3 Revision ID: 9241378c92ab
4 Revises: 12d3e8695f90
5 Create Date: 2017-04-06 20:37:24.766366
6 """
7 from alembic import op
8 import sqlalchemy as sa
9
10
11 # revision identifiers, used by Alembic.
12 revision = '9241378c92ab'
13 down_revision = '12d3e8695f90'
14
15
16 def upgrade():
17 """Add the type column to the builds table."""
18 # The default of ``1`` is the RPM Build type.
19 op.add_column('builds', sa.Column('type', sa.Integer(), nullable=False, server_default=u'1'))
20 op.alter_column('builds', 'type', server_default=None)
21
22
23 def downgrade():
24 """Remove the type column from the builds table."""
25 op.drop_column('builds', 'type')
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py b/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py
--- a/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py
+++ b/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py
@@ -15,9 +15,11 @@
def upgrade():
"""Add the type column to the builds table."""
- # The default of ``1`` is the RPM Build type.
- op.add_column('builds', sa.Column('type', sa.Integer(), nullable=False, server_default=u'1'))
- op.alter_column('builds', 'type', server_default=None)
+ builds = sa.sql.table('builds', sa.sql.column('type', sa.Integer()))
+ op.add_column('builds', sa.Column('type', sa.Integer(), nullable=True))
+ # The type 1 is the RPM Build type.
+ op.execute(builds.update().values({'type': 1}))
+ op.alter_column('builds', 'type', nullable=False)
def downgrade():
| {"golden_diff": "diff --git a/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py b/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py\n--- a/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py\n+++ b/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py\n@@ -15,9 +15,11 @@\n \n def upgrade():\n \"\"\"Add the type column to the builds table.\"\"\"\n- # The default of ``1`` is the RPM Build type.\n- op.add_column('builds', sa.Column('type', sa.Integer(), nullable=False, server_default=u'1'))\n- op.alter_column('builds', 'type', server_default=None)\n+ builds = sa.sql.table('builds', sa.sql.column('type', sa.Integer()))\n+ op.add_column('builds', sa.Column('type', sa.Integer(), nullable=True))\n+ # The type 1 is the RPM Build type.\n+ op.execute(builds.update().values({'type': 1}))\n+ op.alter_column('builds', 'type', nullable=False)\n \n \n def downgrade():\n", "issue": "alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py uses server default\nThe ```alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py``` migration uses a server default, which is not allowed by BDR:\r\n\r\n```\r\n[bowlofeggs@bodhi-backend01 ~][STG]$ sudo /usr/bin/alembic -c /etc/bodhi/alembic.ini upgrade head\r\nINFO [alembic.runtime.migration] Context impl PostgresqlImpl.\r\nINFO [alembic.runtime.migration] Will assume transactional DDL.\r\nINFO [alembic.runtime.migration] Running upgrade 12d3e8695f90 -> 9241378c92ab, Convert the builds table to be polymorphic.\r\nTraceback (most recent call last):\r\n File \"/usr/bin/alembic\", line 12, in <module>\r\n sys.exit(load_entry_point('alembic', 'console_scripts', 'alembic')())\r\n File \"/usr/lib/python2.7/site-packages/alembic/config.py\", line 479, in main\r\n CommandLine(prog=prog).main(argv=argv)\r\n File \"/usr/lib/python2.7/site-packages/alembic/config.py\", line 473, in main\r\n self.run_cmd(cfg, options)\r\n File \"/usr/lib/python2.7/site-packages/alembic/config.py\", line 456, in run_cmd\r\n **dict((k, getattr(options, k)) for k in kwarg)\r\n File \"/usr/lib/python2.7/site-packages/alembic/command.py\", line 174, in upgrade\r\n script.run_env()\r\n File \"/usr/lib/python2.7/site-packages/alembic/script/base.py\", line 397, in run_env\r\n util.load_python_file(self.dir, 'env.py')\r\n File \"/usr/lib/python2.7/site-packages/alembic/util/pyfiles.py\", line 93, in load_python_file\r\n module = load_module_py(module_id, path)\r\n File \"/usr/lib/python2.7/site-packages/alembic/util/compat.py\", line 79, in load_module_py\r\n mod = imp.load_source(module_id, path, fp)\r\n File \"/usr/share/bodhi/alembic/env.py\", line 83, in <module>\r\n run_migrations_online()\r\n File \"/usr/share/bodhi/alembic/env.py\", line 76, in run_migrations_online\r\n context.run_migrations()\r\n File \"<string>\", line 8, in run_migrations\r\n File \"/usr/lib/python2.7/site-packages/alembic/runtime/environment.py\", line 797, in run_migrations\r\n self.get_context().run_migrations(**kw)\r\n File \"/usr/lib/python2.7/site-packages/alembic/runtime/migration.py\", line 312, in run_migrations\r\n step.migration_fn(**kw)\r\n File \"/usr/share/bodhi/alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py\", line 19, in upgrade\r\n op.add_column('builds', sa.Column('type', sa.Integer(), nullable=False, server_default=u'1'))\r\n File \"<string>\", line 8, in add_column\r\n File \"<string>\", line 3, in add_column\r\n File \"/usr/lib/python2.7/site-packages/alembic/operations/ops.py\", line 1535, in add_column\r\n return operations.invoke(op)\r\n File \"/usr/lib/python2.7/site-packages/alembic/operations/base.py\", line 318, in invoke\r\n return fn(self, operation)\r\n File \"/usr/lib/python2.7/site-packages/alembic/operations/toimpl.py\", line 123, in add_column\r\n schema=schema\r\n File \"/usr/lib/python2.7/site-packages/alembic/ddl/impl.py\", line 172, in add_column\r\n self._exec(base.AddColumn(table_name, column, schema=schema))\r\n File \"/usr/lib/python2.7/site-packages/alembic/ddl/impl.py\", line 118, in _exec\r\n return conn.execute(construct, *multiparams, **params)\r\n File \"/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py\", line 914, in execute\r\n return meth(self, multiparams, params)\r\n File \"/usr/lib64/python2.7/site-packages/sqlalchemy/sql/ddl.py\", line 68, in _execute_on_connection\r\n return connection._execute_ddl(self, multiparams, params)\r\n File \"/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py\", line 968, in _execute_ddl\r\n compiled\r\n File \"/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py\", line 1146, in _execute_context\r\n context)\r\n File \"/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py\", line 1341, in _handle_dbapi_exception\r\n exc_info\r\n File \"/usr/lib64/python2.7/site-packages/sqlalchemy/util/compat.py\", line 203, in raise_from_cause\r\n reraise(type(exception), exception, tb=exc_tb, cause=cause)\r\n File \"/usr/lib64/python2.7/site-packages/sqlalchemy/engine/base.py\", line 1139, in _execute_context\r\n context)\r\n File \"/usr/lib64/python2.7/site-packages/sqlalchemy/engine/default.py\", line 450, in do_execute\r\n cursor.execute(statement, parameters)\r\nsqlalchemy.exc.NotSupportedError: (psycopg2.NotSupportedError) ALTER TABLE ... ADD COLUMN ... DEFAULT may only affect UNLOGGED or TEMPORARY tables when BDR is active; builds is a regular table\r\n [SQL: \"ALTER TABLE builds ADD COLUMN type INTEGER DEFAULT '1' NOT NULL\"]\r\n```\n", "before_files": [{"content": "\"\"\"Convert the builds table to be polymorphic.\n\nRevision ID: 9241378c92ab\nRevises: 12d3e8695f90\nCreate Date: 2017-04-06 20:37:24.766366\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '9241378c92ab'\ndown_revision = '12d3e8695f90'\n\n\ndef upgrade():\n \"\"\"Add the type column to the builds table.\"\"\"\n # The default of ``1`` is the RPM Build type.\n op.add_column('builds', sa.Column('type', sa.Integer(), nullable=False, server_default=u'1'))\n op.alter_column('builds', 'type', server_default=None)\n\n\ndef downgrade():\n \"\"\"Remove the type column from the builds table.\"\"\"\n op.drop_column('builds', 'type')\n", "path": "alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py"}], "after_files": [{"content": "\"\"\"Convert the builds table to be polymorphic.\n\nRevision ID: 9241378c92ab\nRevises: 12d3e8695f90\nCreate Date: 2017-04-06 20:37:24.766366\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '9241378c92ab'\ndown_revision = '12d3e8695f90'\n\n\ndef upgrade():\n \"\"\"Add the type column to the builds table.\"\"\"\n builds = sa.sql.table('builds', sa.sql.column('type', sa.Integer()))\n op.add_column('builds', sa.Column('type', sa.Integer(), nullable=True))\n # The type 1 is the RPM Build type.\n op.execute(builds.update().values({'type': 1}))\n op.alter_column('builds', 'type', nullable=False)\n\n\ndef downgrade():\n \"\"\"Remove the type column from the builds table.\"\"\"\n op.drop_column('builds', 'type')\n", "path": "alembic/versions/9241378c92ab_convert_the_builds_table_to_be_.py"}]} | 1,902 | 294 |
gh_patches_debug_5168 | rasdani/github-patches | git_diff | ivy-llc__ivy-13695 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
poisson
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/jax/random.py`
Content:
```
1 # local
2 import ivy
3 from ivy.func_wrapper import with_unsupported_dtypes
4 from ivy.functional.frontends.jax.func_wrapper import (
5 to_ivy_arrays_and_back,
6 handle_jax_dtype,
7 )
8
9
10 @to_ivy_arrays_and_back
11 def PRNGKey(seed):
12 return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64)
13
14
15 @handle_jax_dtype
16 @to_ivy_arrays_and_back
17 def uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0):
18 return ivy.random_uniform(
19 low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])
20 )
21
22
23 @handle_jax_dtype
24 @to_ivy_arrays_and_back
25 def normal(key, shape=(), dtype=None):
26 return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]))
27
28
29 def _get_seed(key):
30 key1, key2 = int(key[0]), int(key[1])
31 return ivy.to_scalar(int("".join(map(str, [key1, key2]))))
32
33
34 @handle_jax_dtype
35 @to_ivy_arrays_and_back
36 @with_unsupported_dtypes(
37 {
38 "0.3.14 and below": (
39 "float16",
40 "bfloat16",
41 )
42 },
43 "jax",
44 )
45 def beta(key, a, b, shape=None, dtype=None):
46 seed = _get_seed(key)
47 return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed)
48
49
50 @handle_jax_dtype
51 @to_ivy_arrays_and_back
52 @with_unsupported_dtypes(
53 {
54 "0.3.14 and below": (
55 "float16",
56 "bfloat16",
57 )
58 },
59 "jax",
60 )
61 def dirichlet(key, alpha, shape=None, dtype="float32"):
62 seed = _get_seed(key)
63 alpha = ivy.astype(alpha, dtype)
64 return ivy.dirichlet(alpha, size=shape, dtype=dtype, seed=seed)
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/jax/random.py b/ivy/functional/frontends/jax/random.py
--- a/ivy/functional/frontends/jax/random.py
+++ b/ivy/functional/frontends/jax/random.py
@@ -62,3 +62,14 @@
seed = _get_seed(key)
alpha = ivy.astype(alpha, dtype)
return ivy.dirichlet(alpha, size=shape, dtype=dtype, seed=seed)
+
+
+@handle_jax_dtype
+@to_ivy_arrays_and_back
+@with_unsupported_dtypes(
+ {"0.3.14 and below": ("unsigned", "int8", "int16")},
+ "jax",
+)
+def poisson(key, lam, shape=None, dtype=None):
+ seed = _get_seed(key)
+ return ivy.poisson(lam, shape=shape, dtype=dtype, seed=seed)
| {"golden_diff": "diff --git a/ivy/functional/frontends/jax/random.py b/ivy/functional/frontends/jax/random.py\n--- a/ivy/functional/frontends/jax/random.py\n+++ b/ivy/functional/frontends/jax/random.py\n@@ -62,3 +62,14 @@\n seed = _get_seed(key)\n alpha = ivy.astype(alpha, dtype)\n return ivy.dirichlet(alpha, size=shape, dtype=dtype, seed=seed)\n+\n+\n+@handle_jax_dtype\n+@to_ivy_arrays_and_back\n+@with_unsupported_dtypes(\n+ {\"0.3.14 and below\": (\"unsigned\", \"int8\", \"int16\")},\n+ \"jax\",\n+)\n+def poisson(key, lam, shape=None, dtype=None):\n+ seed = _get_seed(key)\n+ return ivy.poisson(lam, shape=shape, dtype=dtype, seed=seed)\n", "issue": "poisson\n\n", "before_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_jax_dtype,\n)\n\n\n@to_ivy_arrays_and_back\ndef PRNGKey(seed):\n return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0):\n return ivy.random_uniform(\n low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])\n )\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef normal(key, shape=(), dtype=None):\n return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]))\n\n\ndef _get_seed(key):\n key1, key2 = int(key[0]), int(key[1])\n return ivy.to_scalar(int(\"\".join(map(str, [key1, key2]))))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef beta(key, a, b, shape=None, dtype=None):\n seed = _get_seed(key)\n return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef dirichlet(key, alpha, shape=None, dtype=\"float32\"):\n seed = _get_seed(key)\n alpha = ivy.astype(alpha, dtype)\n return ivy.dirichlet(alpha, size=shape, dtype=dtype, seed=seed)\n", "path": "ivy/functional/frontends/jax/random.py"}], "after_files": [{"content": "# local\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.functional.frontends.jax.func_wrapper import (\n to_ivy_arrays_and_back,\n handle_jax_dtype,\n)\n\n\n@to_ivy_arrays_and_back\ndef PRNGKey(seed):\n return ivy.array([0, seed % 4294967295 - (seed // 4294967295)], dtype=ivy.int64)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef uniform(key, shape=(), dtype=None, minval=0.0, maxval=1.0):\n return ivy.random_uniform(\n low=minval, high=maxval, shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1])\n )\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\ndef normal(key, shape=(), dtype=None):\n return ivy.random_normal(shape=shape, dtype=dtype, seed=ivy.to_scalar(key[1]))\n\n\ndef _get_seed(key):\n key1, key2 = int(key[0]), int(key[1])\n return ivy.to_scalar(int(\"\".join(map(str, [key1, key2]))))\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef beta(key, a, b, shape=None, dtype=None):\n seed = _get_seed(key)\n return ivy.beta(a, b, shape=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\n \"0.3.14 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"jax\",\n)\ndef dirichlet(key, alpha, shape=None, dtype=\"float32\"):\n seed = _get_seed(key)\n alpha = ivy.astype(alpha, dtype)\n return ivy.dirichlet(alpha, size=shape, dtype=dtype, seed=seed)\n\n\n@handle_jax_dtype\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes(\n {\"0.3.14 and below\": (\"unsigned\", \"int8\", \"int16\")},\n \"jax\",\n)\ndef poisson(key, lam, shape=None, dtype=None):\n seed = _get_seed(key)\n return ivy.poisson(lam, shape=shape, dtype=dtype, seed=seed)\n", "path": "ivy/functional/frontends/jax/random.py"}]} | 880 | 207 |
gh_patches_debug_41621 | rasdani/github-patches | git_diff | watchdogpolska__feder-328 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Eksport w CSV EmailLog
Wprowadziliśmy w ```feder.letters.logs``` statystyki dostarczania wiadomości. Należy wprowadzić zestawienie wszystkich danych z EmailLog dla danego monitoringu, aby można było zrobić statystykę czy coś.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `feder/letters/logs/views.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 from braces.views import SelectRelatedMixin, PrefetchRelatedMixin
5 from cached_property import cached_property
6 from django.shortcuts import get_object_or_404
7 from django.views.generic import DetailView, ListView
8
9 from feder.cases.models import Case
10 from feder.letters.logs.models import EmailLog
11 from feder.main.mixins import AttrPermissionRequiredMixin
12 from feder.monitorings.models import Monitoring
13
14
15 class ListMonitoringMixin(AttrPermissionRequiredMixin, SelectRelatedMixin):
16 select_related = ['case']
17 paginate_by = 100
18 model = EmailLog
19 permission_attribute = 'case__monitoring'
20 permission_required = 'monitorings.view_log'
21
22 def get_permission_object(self):
23 return self.monitoring
24
25 def get_queryset(self):
26 return super(ListMonitoringMixin, self).get_queryset().filter(case__monitoring=self.monitoring).with_logrecord_count()
27
28 def get_context_data(self, **kwargs):
29 kwargs['monitoring'] = self.monitoring
30 return super(ListMonitoringMixin, self).get_context_data(**kwargs)
31
32
33 class EmailLogMonitoringListView(ListMonitoringMixin, ListView):
34 template_name_suffix = '_list_for_monitoring'
35 permission_required = 'monitorings.view_log'
36
37 @cached_property
38 def monitoring(self):
39 return get_object_or_404(Monitoring, pk=self.kwargs['monitoring_pk'])
40
41
42 class EmailLogCaseListView(ListMonitoringMixin, ListView):
43 template_name_suffix = '_list_for_case'
44
45 @cached_property
46 def case(self):
47 return get_object_or_404(Case.objects.select_related('monitoring'),
48 pk=self.kwargs['case_pk'])
49
50 @cached_property
51 def monitoring(self):
52 return self.case.monitoring
53
54 def get_context_data(self, **kwargs):
55 kwargs['case'] = self.case
56 return super(EmailLogCaseListView, self).get_context_data(**kwargs)
57
58 def get_queryset(self):
59 return super(ListMonitoringMixin, self).get_queryset().filter(case=self.case)
60
61
62 class EmailLogDetailView(AttrPermissionRequiredMixin, PrefetchRelatedMixin,
63 SelectRelatedMixin, DetailView):
64 model = EmailLog
65 select_related = ['case__monitoring']
66 prefetch_related = ['logrecord_set']
67 permission_attribute = 'case__monitoring'
68 permission_required = 'monitorings.view_log'
69
```
Path: `feder/letters/logs/urls.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 from django.conf.urls import url
5 from django.utils.translation import ugettext_lazy as _
6
7 from . import views
8
9 urlpatterns = [
10 url(_(r'^case-(?P<case_pk>[\d-]+)$'), views.EmailLogCaseListView.as_view(),
11 name="list"),
12 url(_(r'^monitoring-(?P<monitoring_pk>[\d-]+)$'), views.EmailLogMonitoringListView.as_view(),
13 name="list"),
14 url(_(r'^log-(?P<pk>[\d-]+)$'), views.EmailLogDetailView.as_view(),
15 name="detail"),
16 ]
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/feder/letters/logs/urls.py b/feder/letters/logs/urls.py
--- a/feder/letters/logs/urls.py
+++ b/feder/letters/logs/urls.py
@@ -11,6 +11,8 @@
name="list"),
url(_(r'^monitoring-(?P<monitoring_pk>[\d-]+)$'), views.EmailLogMonitoringListView.as_view(),
name="list"),
+ url(_(r'^monitoring-(?P<monitoring_pk>[\d-]+)/export$'), views.EmailLogMonitoringCsvView.as_view(),
+ name="export"),
url(_(r'^log-(?P<pk>[\d-]+)$'), views.EmailLogDetailView.as_view(),
name="detail"),
]
diff --git a/feder/letters/logs/views.py b/feder/letters/logs/views.py
--- a/feder/letters/logs/views.py
+++ b/feder/letters/logs/views.py
@@ -1,8 +1,12 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
+from django.utils import timezone
+import unicodecsv as csv
+
from braces.views import SelectRelatedMixin, PrefetchRelatedMixin
from cached_property import cached_property
+from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.views.generic import DetailView, ListView
@@ -10,7 +14,7 @@
from feder.letters.logs.models import EmailLog
from feder.main.mixins import AttrPermissionRequiredMixin
from feder.monitorings.models import Monitoring
-
+from django.views.generic.list import ListView
class ListMonitoringMixin(AttrPermissionRequiredMixin, SelectRelatedMixin):
select_related = ['case']
@@ -39,6 +43,61 @@
return get_object_or_404(Monitoring, pk=self.kwargs['monitoring_pk'])
+class EmailLogMonitoringCsvView(ListMonitoringMixin, ListView):
+ permission_required = 'monitorings.view_log'
+
+ select_related = ['case', 'case__institution']
+
+ @cached_property
+ def monitoring(self):
+ return get_object_or_404(Monitoring, pk=self.kwargs['monitoring_pk'])
+
+ def get(self, *args, **kwargs):
+ response = self._get_csv_response()
+ self._write_rows(response, self.get_queryset())
+ return response
+
+ @staticmethod
+ def _get_base_model_field_names(queryset):
+ opts = queryset.model._meta
+ return [field.name for field in opts.fields if field.related_model is None]
+
+ def _get_csv_response(self):
+ csv_response = HttpResponse(content_type='text/csv')
+ current_time = timezone.now()
+ filename = 'email_log_{0}-{1}-{2}.csv'.format(self.monitoring.id,
+ current_time.strftime('%Y_%m_%d-%H_%M_%S'),
+ current_time.tzname()
+ )
+ csv_response['Content-Disposition'] = "attachment;filename={0}".format(filename)
+ return csv_response
+
+ def _write_rows(self, response, queryset):
+ writer = csv.writer(response)
+
+ # automatically add all fields from base table/model
+ base_field_names = self._get_base_model_field_names(queryset)
+
+ # print header row
+ writer.writerow(base_field_names +
+ [
+ 'case id',
+ 'case email',
+ 'institution',
+ 'institution id',
+ 'monitoring id']
+ )
+
+ for obj in queryset:
+ writer.writerow(
+ [getattr(obj, field) for field in base_field_names] + [
+ obj.case.id,
+ obj.case.email,
+ obj.case.institution.name,
+ obj.case.institution_id,
+ obj.case.monitoring_id,
+ ])
+
class EmailLogCaseListView(ListMonitoringMixin, ListView):
template_name_suffix = '_list_for_case'
| {"golden_diff": "diff --git a/feder/letters/logs/urls.py b/feder/letters/logs/urls.py\n--- a/feder/letters/logs/urls.py\n+++ b/feder/letters/logs/urls.py\n@@ -11,6 +11,8 @@\n name=\"list\"),\n url(_(r'^monitoring-(?P<monitoring_pk>[\\d-]+)$'), views.EmailLogMonitoringListView.as_view(),\n name=\"list\"),\n+ url(_(r'^monitoring-(?P<monitoring_pk>[\\d-]+)/export$'), views.EmailLogMonitoringCsvView.as_view(),\n+ name=\"export\"),\n url(_(r'^log-(?P<pk>[\\d-]+)$'), views.EmailLogDetailView.as_view(),\n name=\"detail\"),\n ]\ndiff --git a/feder/letters/logs/views.py b/feder/letters/logs/views.py\n--- a/feder/letters/logs/views.py\n+++ b/feder/letters/logs/views.py\n@@ -1,8 +1,12 @@\n # -*- coding: utf-8 -*-\n from __future__ import unicode_literals\n \n+from django.utils import timezone\n+import unicodecsv as csv\n+\n from braces.views import SelectRelatedMixin, PrefetchRelatedMixin\n from cached_property import cached_property\n+from django.http import HttpResponse\n from django.shortcuts import get_object_or_404\n from django.views.generic import DetailView, ListView\n \n@@ -10,7 +14,7 @@\n from feder.letters.logs.models import EmailLog\n from feder.main.mixins import AttrPermissionRequiredMixin\n from feder.monitorings.models import Monitoring\n-\n+from django.views.generic.list import ListView\n \n class ListMonitoringMixin(AttrPermissionRequiredMixin, SelectRelatedMixin):\n select_related = ['case']\n@@ -39,6 +43,61 @@\n return get_object_or_404(Monitoring, pk=self.kwargs['monitoring_pk'])\n \n \n+class EmailLogMonitoringCsvView(ListMonitoringMixin, ListView):\n+ permission_required = 'monitorings.view_log'\n+\n+ select_related = ['case', 'case__institution']\n+\n+ @cached_property\n+ def monitoring(self):\n+ return get_object_or_404(Monitoring, pk=self.kwargs['monitoring_pk'])\n+\n+ def get(self, *args, **kwargs):\n+ response = self._get_csv_response()\n+ self._write_rows(response, self.get_queryset())\n+ return response\n+\n+ @staticmethod\n+ def _get_base_model_field_names(queryset):\n+ opts = queryset.model._meta\n+ return [field.name for field in opts.fields if field.related_model is None]\n+\n+ def _get_csv_response(self):\n+ csv_response = HttpResponse(content_type='text/csv')\n+ current_time = timezone.now()\n+ filename = 'email_log_{0}-{1}-{2}.csv'.format(self.monitoring.id,\n+ current_time.strftime('%Y_%m_%d-%H_%M_%S'),\n+ current_time.tzname()\n+ )\n+ csv_response['Content-Disposition'] = \"attachment;filename={0}\".format(filename)\n+ return csv_response\n+\n+ def _write_rows(self, response, queryset):\n+ writer = csv.writer(response)\n+\n+ # automatically add all fields from base table/model\n+ base_field_names = self._get_base_model_field_names(queryset)\n+\n+ # print header row\n+ writer.writerow(base_field_names +\n+ [\n+ 'case id',\n+ 'case email',\n+ 'institution',\n+ 'institution id',\n+ 'monitoring id']\n+ )\n+\n+ for obj in queryset:\n+ writer.writerow(\n+ [getattr(obj, field) for field in base_field_names] + [\n+ obj.case.id,\n+ obj.case.email,\n+ obj.case.institution.name,\n+ obj.case.institution_id,\n+ obj.case.monitoring_id,\n+ ])\n+\n class EmailLogCaseListView(ListMonitoringMixin, ListView):\n template_name_suffix = '_list_for_case'\n", "issue": "Eksport w CSV EmailLog \nWprowadzili\u015bmy w ```feder.letters.logs``` statystyki dostarczania wiadomo\u015bci. Nale\u017cy wprowadzi\u0107 zestawienie wszystkich danych z EmailLog dla danego monitoringu, aby mo\u017cna by\u0142o zrobi\u0107 statystyk\u0119 czy co\u015b.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom braces.views import SelectRelatedMixin, PrefetchRelatedMixin\nfrom cached_property import cached_property\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic import DetailView, ListView\n\nfrom feder.cases.models import Case\nfrom feder.letters.logs.models import EmailLog\nfrom feder.main.mixins import AttrPermissionRequiredMixin\nfrom feder.monitorings.models import Monitoring\n\n\nclass ListMonitoringMixin(AttrPermissionRequiredMixin, SelectRelatedMixin):\n select_related = ['case']\n paginate_by = 100\n model = EmailLog\n permission_attribute = 'case__monitoring'\n permission_required = 'monitorings.view_log'\n\n def get_permission_object(self):\n return self.monitoring\n\n def get_queryset(self):\n return super(ListMonitoringMixin, self).get_queryset().filter(case__monitoring=self.monitoring).with_logrecord_count()\n\n def get_context_data(self, **kwargs):\n kwargs['monitoring'] = self.monitoring\n return super(ListMonitoringMixin, self).get_context_data(**kwargs)\n\n\nclass EmailLogMonitoringListView(ListMonitoringMixin, ListView):\n template_name_suffix = '_list_for_monitoring'\n permission_required = 'monitorings.view_log'\n\n @cached_property\n def monitoring(self):\n return get_object_or_404(Monitoring, pk=self.kwargs['monitoring_pk'])\n\n\nclass EmailLogCaseListView(ListMonitoringMixin, ListView):\n template_name_suffix = '_list_for_case'\n\n @cached_property\n def case(self):\n return get_object_or_404(Case.objects.select_related('monitoring'),\n pk=self.kwargs['case_pk'])\n\n @cached_property\n def monitoring(self):\n return self.case.monitoring\n\n def get_context_data(self, **kwargs):\n kwargs['case'] = self.case\n return super(EmailLogCaseListView, self).get_context_data(**kwargs)\n\n def get_queryset(self):\n return super(ListMonitoringMixin, self).get_queryset().filter(case=self.case)\n\n\nclass EmailLogDetailView(AttrPermissionRequiredMixin, PrefetchRelatedMixin,\n SelectRelatedMixin, DetailView):\n model = EmailLog\n select_related = ['case__monitoring']\n prefetch_related = ['logrecord_set']\n permission_attribute = 'case__monitoring'\n permission_required = 'monitorings.view_log'\n", "path": "feder/letters/logs/views.py"}, {"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.conf.urls import url\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom . import views\n\nurlpatterns = [\n url(_(r'^case-(?P<case_pk>[\\d-]+)$'), views.EmailLogCaseListView.as_view(),\n name=\"list\"),\n url(_(r'^monitoring-(?P<monitoring_pk>[\\d-]+)$'), views.EmailLogMonitoringListView.as_view(),\n name=\"list\"),\n url(_(r'^log-(?P<pk>[\\d-]+)$'), views.EmailLogDetailView.as_view(),\n name=\"detail\"),\n]\n", "path": "feder/letters/logs/urls.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.utils import timezone\nimport unicodecsv as csv\n\nfrom braces.views import SelectRelatedMixin, PrefetchRelatedMixin\nfrom cached_property import cached_property\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic import DetailView, ListView\n\nfrom feder.cases.models import Case\nfrom feder.letters.logs.models import EmailLog\nfrom feder.main.mixins import AttrPermissionRequiredMixin\nfrom feder.monitorings.models import Monitoring\nfrom django.views.generic.list import ListView\n\nclass ListMonitoringMixin(AttrPermissionRequiredMixin, SelectRelatedMixin):\n select_related = ['case']\n paginate_by = 100\n model = EmailLog\n permission_attribute = 'case__monitoring'\n permission_required = 'monitorings.view_log'\n\n def get_permission_object(self):\n return self.monitoring\n\n def get_queryset(self):\n return super(ListMonitoringMixin, self).get_queryset().filter(case__monitoring=self.monitoring).with_logrecord_count()\n\n def get_context_data(self, **kwargs):\n kwargs['monitoring'] = self.monitoring\n return super(ListMonitoringMixin, self).get_context_data(**kwargs)\n\n\nclass EmailLogMonitoringListView(ListMonitoringMixin, ListView):\n template_name_suffix = '_list_for_monitoring'\n permission_required = 'monitorings.view_log'\n\n @cached_property\n def monitoring(self):\n return get_object_or_404(Monitoring, pk=self.kwargs['monitoring_pk'])\n\n\nclass EmailLogMonitoringCsvView(ListMonitoringMixin, ListView):\n permission_required = 'monitorings.view_log'\n\n select_related = ['case', 'case__institution']\n\n @cached_property\n def monitoring(self):\n return get_object_or_404(Monitoring, pk=self.kwargs['monitoring_pk'])\n\n def get(self, *args, **kwargs):\n response = self._get_csv_response()\n self._write_rows(response, self.get_queryset())\n return response\n\n @staticmethod\n def _get_base_model_field_names(queryset):\n opts = queryset.model._meta\n return [field.name for field in opts.fields if field.related_model is None]\n\n def _get_csv_response(self):\n csv_response = HttpResponse(content_type='text/csv')\n current_time = timezone.now()\n filename = 'email_log_{0}-{1}-{2}.csv'.format(self.monitoring.id,\n current_time.strftime('%Y_%m_%d-%H_%M_%S'),\n current_time.tzname()\n )\n csv_response['Content-Disposition'] = \"attachment;filename={0}\".format(filename)\n return csv_response\n\n def _write_rows(self, response, queryset):\n writer = csv.writer(response)\n\n # automatically add all fields from base table/model\n base_field_names = self._get_base_model_field_names(queryset)\n\n # print header row\n writer.writerow(base_field_names +\n [\n 'case id',\n 'case email',\n 'institution',\n 'institution id',\n 'monitoring id']\n )\n\n for obj in queryset:\n writer.writerow(\n [getattr(obj, field) for field in base_field_names] + [\n obj.case.id,\n obj.case.email,\n obj.case.institution.name,\n obj.case.institution_id,\n obj.case.monitoring_id,\n ])\n\nclass EmailLogCaseListView(ListMonitoringMixin, ListView):\n template_name_suffix = '_list_for_case'\n\n @cached_property\n def case(self):\n return get_object_or_404(Case.objects.select_related('monitoring'),\n pk=self.kwargs['case_pk'])\n\n @cached_property\n def monitoring(self):\n return self.case.monitoring\n\n def get_context_data(self, **kwargs):\n kwargs['case'] = self.case\n return super(EmailLogCaseListView, self).get_context_data(**kwargs)\n\n def get_queryset(self):\n return super(ListMonitoringMixin, self).get_queryset().filter(case=self.case)\n\n\nclass EmailLogDetailView(AttrPermissionRequiredMixin, PrefetchRelatedMixin,\n SelectRelatedMixin, DetailView):\n model = EmailLog\n select_related = ['case__monitoring']\n prefetch_related = ['logrecord_set']\n permission_attribute = 'case__monitoring'\n permission_required = 'monitorings.view_log'\n", "path": "feder/letters/logs/views.py"}, {"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.conf.urls import url\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom . import views\n\nurlpatterns = [\n url(_(r'^case-(?P<case_pk>[\\d-]+)$'), views.EmailLogCaseListView.as_view(),\n name=\"list\"),\n url(_(r'^monitoring-(?P<monitoring_pk>[\\d-]+)$'), views.EmailLogMonitoringListView.as_view(),\n name=\"list\"),\n url(_(r'^monitoring-(?P<monitoring_pk>[\\d-]+)/export$'), views.EmailLogMonitoringCsvView.as_view(),\n name=\"export\"),\n url(_(r'^log-(?P<pk>[\\d-]+)$'), views.EmailLogDetailView.as_view(),\n name=\"detail\"),\n]\n", "path": "feder/letters/logs/urls.py"}]} | 1,156 | 860 |
gh_patches_debug_14332 | rasdani/github-patches | git_diff | scikit-hep__pyhf-638 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Automate deployment to PyPI
# Description
According to @lukasheinrich, the current workflow for deploying to PyPI is:
```
git checkout master
git pull
bumpversion patch
git commit
git push origin master --tags
```
This is a bit annoyingly manual and ideally should be done automatically.
Luckily, there is an [official PyPA GitHub action](https://discuss.python.org/t/official-github-action-for-publishing-to-pypi/1061) to do this:
https://github.com/pypa/gh-action-pypi-publish
However, we need GitHub actions for pyhf, so we have to wait.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 from os import path
3 import sys
4
5 this_directory = path.abspath(path.dirname(__file__))
6 if sys.version_info.major < 3:
7 from io import open
8 with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:
9 long_description = readme_md.read()
10
11 extras_require = {
12 'tensorflow': ['tensorflow~=1.15', 'tensorflow-probability~=0.8', 'numpy~=1.16',],
13 'torch': ['torch~=1.2'],
14 'xmlio': ['uproot'],
15 'minuit': ['iminuit'],
16 'develop': [
17 'pyflakes',
18 'pytest~=3.5',
19 'pytest-cov>=2.5.1',
20 'pytest-mock',
21 'pytest-benchmark[histogram]',
22 'pytest-console-scripts',
23 'pydocstyle',
24 'coverage>=4.0', # coveralls
25 'matplotlib',
26 'jupyter',
27 'nbdime',
28 'uproot~=3.3',
29 'papermill~=1.0',
30 'nteract-scrapbook~=0.2',
31 'graphviz',
32 'bumpversion',
33 'sphinx',
34 'sphinxcontrib-bibtex',
35 'sphinxcontrib-napoleon',
36 'sphinx_rtd_theme',
37 'nbsphinx',
38 'sphinx-issues',
39 'm2r',
40 'jsonpatch',
41 'ipython',
42 'pre-commit',
43 'black;python_version>="3.6"', # Black is Python3 only
44 'twine',
45 'check-manifest',
46 ],
47 }
48 extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
49
50
51 def _is_test_pypi():
52 """
53 Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and
54 set to true (c.f. .travis.yml)
55
56 The use_scm_version kwarg accepts a callable for the local_scheme
57 configuration parameter with argument "version". This can be replaced
58 with a lambda as the desired version structure is {next_version}.dev{distance}
59 c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy
60
61 As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version
62 controlled through bumpversion is used.
63 """
64 from os import getenv
65
66 return (
67 {'local_scheme': lambda version: ''}
68 if getenv('TESTPYPI_UPLOAD') == 'true'
69 else False
70 )
71
72
73 setup(
74 name='pyhf',
75 version='0.2.0',
76 description='(partial) pure python histfactory implementation',
77 long_description=long_description,
78 long_description_content_type='text/markdown',
79 url='https://github.com/diana-hep/pyhf',
80 author='Lukas Heinrich, Matthew Feickert, Giordon Stark',
81 author_email='[email protected], [email protected], [email protected]',
82 license='Apache',
83 keywords='physics fitting numpy scipy tensorflow pytorch',
84 classifiers=[
85 "Programming Language :: Python :: 2",
86 "Programming Language :: Python :: 2.7",
87 "Programming Language :: Python :: 3",
88 "Programming Language :: Python :: 3.6",
89 "Programming Language :: Python :: 3.7",
90 ],
91 package_dir={'': 'src'},
92 packages=find_packages(where='src'),
93 include_package_data=True,
94 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*",
95 install_requires=[
96 'scipy', # requires numpy, which is required by pyhf and tensorflow
97 'click>=6.0', # for console scripts,
98 'tqdm', # for readxml
99 'six', # for modifiers
100 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6
101 'jsonpatch',
102 'pyyaml', # for parsing CLI equal-delimited options
103 ],
104 extras_require=extras_require,
105 entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},
106 dependency_links=[],
107 use_scm_version=_is_test_pypi(),
108 )
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -50,8 +50,8 @@
def _is_test_pypi():
"""
- Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and
- set to true (c.f. .travis.yml)
+ Determine if the CI environment has IS_TESTPYPI defined and
+ set to true (c.f. .github/workflows/publish-package.yml)
The use_scm_version kwarg accepts a callable for the local_scheme
configuration parameter with argument "version". This can be replaced
@@ -65,7 +65,7 @@
return (
{'local_scheme': lambda version: ''}
- if getenv('TESTPYPI_UPLOAD') == 'true'
+ if getenv('IS_TESTPYPI') == 'true'
else False
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -50,8 +50,8 @@\n \n def _is_test_pypi():\n \"\"\"\n- Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and\n- set to true (c.f. .travis.yml)\n+ Determine if the CI environment has IS_TESTPYPI defined and\n+ set to true (c.f. .github/workflows/publish-package.yml)\n \n The use_scm_version kwarg accepts a callable for the local_scheme\n configuration parameter with argument \"version\". This can be replaced\n@@ -65,7 +65,7 @@\n \n return (\n {'local_scheme': lambda version: ''}\n- if getenv('TESTPYPI_UPLOAD') == 'true'\n+ if getenv('IS_TESTPYPI') == 'true'\n else False\n )\n", "issue": "Automate deployment to PyPI\n# Description\r\n\r\nAccording to @lukasheinrich, the current workflow for deploying to PyPI is:\r\n\r\n```\r\ngit checkout master\r\ngit pull\r\nbumpversion patch\r\ngit commit\r\ngit push origin master --tags\r\n```\r\n\r\nThis is a bit annoyingly manual and ideally should be done automatically.\r\n\r\nLuckily, there is an [official PyPA GitHub action](https://discuss.python.org/t/official-github-action-for-publishing-to-pypi/1061) to do this:\r\n\r\nhttps://github.com/pypa/gh-action-pypi-publish\r\n\r\nHowever, we need GitHub actions for pyhf, so we have to wait.\n", "before_files": [{"content": "from setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=1.15', 'tensorflow-probability~=0.8', 'numpy~=1.16',],\n 'torch': ['torch~=1.2'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot~=3.3',\n 'papermill~=1.0',\n 'nteract-scrapbook~=0.2',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython',\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n 'check-manifest',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\ndef _is_test_pypi():\n \"\"\"\n Determine if the Travis CI environment has TESTPYPI_UPLOAD defined and\n set to true (c.f. .travis.yml)\n\n The use_scm_version kwarg accepts a callable for the local_scheme\n configuration parameter with argument \"version\". This can be replaced\n with a lambda as the desired version structure is {next_version}.dev{distance}\n c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy\n\n As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version\n controlled through bumpversion is used.\n \"\"\"\n from os import getenv\n\n return (\n {'local_scheme': lambda version: ''}\n if getenv('TESTPYPI_UPLOAD') == 'true'\n else False\n )\n\n\nsetup(\n name='pyhf',\n version='0.2.0',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n use_scm_version=_is_test_pypi(),\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nfrom os import path\nimport sys\n\nthis_directory = path.abspath(path.dirname(__file__))\nif sys.version_info.major < 3:\n from io import open\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': ['tensorflow~=1.15', 'tensorflow-probability~=0.8', 'numpy~=1.16',],\n 'torch': ['torch~=1.2'],\n 'xmlio': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest~=3.5',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot~=3.3',\n 'papermill~=1.0',\n 'nteract-scrapbook~=0.2',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'sphinx-issues',\n 'm2r',\n 'jsonpatch',\n 'ipython',\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n 'check-manifest',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\ndef _is_test_pypi():\n \"\"\"\n Determine if the CI environment has IS_TESTPYPI defined and\n set to true (c.f. .github/workflows/publish-package.yml)\n\n The use_scm_version kwarg accepts a callable for the local_scheme\n configuration parameter with argument \"version\". This can be replaced\n with a lambda as the desired version structure is {next_version}.dev{distance}\n c.f. https://github.com/pypa/setuptools_scm/#importing-in-setuppy\n\n As the scm versioning is only desired for TestPyPI, for depolyment to PyPI the version\n controlled through bumpversion is used.\n \"\"\"\n from os import getenv\n\n return (\n {'local_scheme': lambda version: ''}\n if getenv('IS_TESTPYPI') == 'true'\n else False\n )\n\n\nsetup(\n name='pyhf',\n version='0.2.0',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich, Matthew Feickert, Giordon Stark',\n author_email='[email protected], [email protected], [email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf and tensorflow\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n 'pyyaml', # for parsing CLI equal-delimited options\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n use_scm_version=_is_test_pypi(),\n)\n", "path": "setup.py"}]} | 1,573 | 194 |
gh_patches_debug_59597 | rasdani/github-patches | git_diff | googleapis__python-bigquery-587 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
loosen opentelemetry dependencies
See Spanner PR: https://github.com/googleapis/python-spanner/pull/298
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import io
16 import os
17
18 import setuptools
19
20
21 # Package metadata.
22
23 name = "google-cloud-bigquery"
24 description = "Google BigQuery API client library"
25
26 # Should be one of:
27 # 'Development Status :: 3 - Alpha'
28 # 'Development Status :: 4 - Beta'
29 # 'Development Status :: 5 - Production/Stable'
30 release_status = "Development Status :: 5 - Production/Stable"
31 dependencies = [
32 "google-api-core[grpc] >= 1.23.0, < 2.0.0dev",
33 "proto-plus >= 1.10.0",
34 "google-cloud-core >= 1.4.1, < 2.0dev",
35 "google-resumable-media >= 0.6.0, < 2.0dev",
36 "packaging >= 14.3",
37 "protobuf >= 3.12.0",
38 "requests >= 2.18.0, < 3.0.0dev",
39 ]
40 extras = {
41 "bqstorage": [
42 "google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev",
43 # Due to an issue in pip's dependency resolver, the `grpc` extra is not
44 # installed, even though `google-cloud-bigquery-storage` specifies it
45 # as `google-api-core[grpc]`. We thus need to explicitly specify it here.
46 # See: https://github.com/googleapis/python-bigquery/issues/83 The
47 # grpc.Channel.close() method isn't added until 1.32.0.
48 # https://github.com/grpc/grpc/pull/15254
49 "grpcio >= 1.32.0, < 2.0dev",
50 "pyarrow >= 1.0.0, < 4.0dev",
51 ],
52 "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 4.0dev"],
53 "bignumeric_type": ["pyarrow >= 3.0.0, < 4.0dev"],
54 "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"],
55 "opentelemetry": [
56 "opentelemetry-api==0.11b0",
57 "opentelemetry-sdk==0.11b0",
58 "opentelemetry-instrumentation==0.11b0",
59 ],
60 }
61
62 all_extras = []
63
64 for extra in extras:
65 # Exclude this extra from all to avoid overly strict dependencies on core
66 # libraries such as pyarrow.
67 # https://github.com/googleapis/python-bigquery/issues/563
68 if extra in {"bignumeric_type"}:
69 continue
70 all_extras.extend(extras[extra])
71
72 extras["all"] = all_extras
73
74 # Setup boilerplate below this line.
75
76 package_root = os.path.abspath(os.path.dirname(__file__))
77
78 readme_filename = os.path.join(package_root, "README.rst")
79 with io.open(readme_filename, encoding="utf-8") as readme_file:
80 readme = readme_file.read()
81
82 version = {}
83 with open(os.path.join(package_root, "google/cloud/bigquery/version.py")) as fp:
84 exec(fp.read(), version)
85 version = version["__version__"]
86
87 # Only include packages under the 'google' namespace. Do not include tests,
88 # benchmarks, etc.
89 packages = [
90 package
91 for package in setuptools.PEP420PackageFinder.find()
92 if package.startswith("google")
93 ]
94
95 # Determine which namespaces are needed.
96 namespaces = ["google"]
97 if "google.cloud" in packages:
98 namespaces.append("google.cloud")
99
100
101 setuptools.setup(
102 name=name,
103 version=version,
104 description=description,
105 long_description=readme,
106 author="Google LLC",
107 author_email="[email protected]",
108 license="Apache 2.0",
109 url="https://github.com/googleapis/python-bigquery",
110 classifiers=[
111 release_status,
112 "Intended Audience :: Developers",
113 "License :: OSI Approved :: Apache Software License",
114 "Programming Language :: Python",
115 "Programming Language :: Python :: 3",
116 "Programming Language :: Python :: 3.6",
117 "Programming Language :: Python :: 3.7",
118 "Programming Language :: Python :: 3.8",
119 "Programming Language :: Python :: 3.9",
120 "Operating System :: OS Independent",
121 "Topic :: Internet",
122 ],
123 platforms="Posix; MacOS X; Windows",
124 packages=packages,
125 namespace_packages=namespaces,
126 install_requires=dependencies,
127 extras_require=extras,
128 python_requires=">=3.6, <3.10",
129 include_package_data=True,
130 zip_safe=False,
131 )
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -53,9 +53,9 @@
"bignumeric_type": ["pyarrow >= 3.0.0, < 4.0dev"],
"tqdm": ["tqdm >= 4.7.4, <5.0.0dev"],
"opentelemetry": [
- "opentelemetry-api==0.11b0",
- "opentelemetry-sdk==0.11b0",
- "opentelemetry-instrumentation==0.11b0",
+ "opentelemetry-api >= 0.11b0",
+ "opentelemetry-sdk >= 0.11b0",
+ "opentelemetry-instrumentation >= 0.11b0",
],
}
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -53,9 +53,9 @@\n \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 4.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n- \"opentelemetry-api==0.11b0\",\n- \"opentelemetry-sdk==0.11b0\",\n- \"opentelemetry-instrumentation==0.11b0\",\n+ \"opentelemetry-api >= 0.11b0\",\n+ \"opentelemetry-sdk >= 0.11b0\",\n+ \"opentelemetry-instrumentation >= 0.11b0\",\n ],\n }\n", "issue": "loosen opentelemetry dependencies\nSee Spanner PR: https://github.com/googleapis/python-spanner/pull/298\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.23.0, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"packaging >= 14.3\",\n \"protobuf >= 3.12.0\",\n \"requests >= 2.18.0, < 3.0.0dev\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 4.0dev\",\n ],\n \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 4.0dev\"],\n \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 4.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api==0.11b0\",\n \"opentelemetry-sdk==0.11b0\",\n \"opentelemetry-instrumentation==0.11b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n # Exclude this extra from all to avoid overly strict dependencies on core\n # libraries such as pyarrow.\n # https://github.com/googleapis/python-bigquery/issues/563\n if extra in {\"bignumeric_type\"}:\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6, <3.10\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.23.0, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"packaging >= 14.3\",\n \"protobuf >= 3.12.0\",\n \"requests >= 2.18.0, < 3.0.0dev\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 4.0dev\",\n ],\n \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 4.0dev\"],\n \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 4.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api >= 0.11b0\",\n \"opentelemetry-sdk >= 0.11b0\",\n \"opentelemetry-instrumentation >= 0.11b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n # Exclude this extra from all to avoid overly strict dependencies on core\n # libraries such as pyarrow.\n # https://github.com/googleapis/python-bigquery/issues/563\n if extra in {\"bignumeric_type\"}:\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6, <3.10\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 1,740 | 190 |
gh_patches_debug_14536 | rasdani/github-patches | git_diff | mozmeao__snippets-service-864 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Filter by release channel on ASRSnippets raises an error
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snippets/base/admin/filters.py`
Content:
```
1 from datetime import datetime, timedelta
2
3 from django.contrib import admin
4 from django.utils.encoding import force_text
5
6
7 class ModifiedFilter(admin.SimpleListFilter):
8 title = 'Last modified'
9 parameter_name = 'last_modified'
10
11 def lookups(self, request, model_admin):
12 return (
13 ('24', '24 hours'),
14 ('168', '7 days'),
15 ('336', '14 days'),
16 ('720', '30 days'),
17 ('all', 'All'),
18 )
19
20 def queryset(self, request, queryset):
21 value = self.value()
22 if not value or value == 'all':
23 return queryset
24
25 when = datetime.utcnow() - timedelta(hours=int(value))
26 return queryset.exclude(modified__lt=when)
27
28 def choices(self, cl):
29 for lookup, title in self.lookup_choices:
30 yield {
31 'selected': self.value() == force_text(lookup),
32 'query_string': cl.get_query_string({
33 self.parameter_name: lookup,
34 }, []),
35 'display': title,
36 }
37
38
39 class ChannelFilter(admin.SimpleListFilter):
40 title = 'Channel'
41 parameter_name = 'channel'
42
43 def lookups(self, request, model_admin):
44 return (
45 ('on_release', 'Release'),
46 ('on_esr', 'ESR'),
47 ('on_beta', 'Beta'),
48 ('on_aurora', 'Dev (Aurora)'),
49 ('on_nightly', 'Nightly'),
50 )
51
52 def queryset(self, request, queryset):
53 if self.value() is None:
54 return queryset
55
56 return queryset.filter(**{self.value(): True})
57
58
59 class ActivityStreamFilter(admin.SimpleListFilter):
60 title = 'Activity Stream'
61 parameter_name = 'is_activity_stream'
62
63 def lookups(self, request, model_admin):
64 return (
65 ('yes', 'Yes'),
66 ('no', 'No'),
67 )
68
69 def queryset(self, request, queryset):
70 if self.value() is None:
71 return queryset
72 elif self.value() == 'yes':
73 return queryset.filter(on_startpage_5=True)
74 elif self.value() == 'no':
75 return queryset.exclude(on_startpage_5=True)
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/snippets/base/admin/filters.py b/snippets/base/admin/filters.py
--- a/snippets/base/admin/filters.py
+++ b/snippets/base/admin/filters.py
@@ -3,6 +3,8 @@
from django.contrib import admin
from django.utils.encoding import force_text
+from snippets.base.managers import SnippetQuerySet
+
class ModifiedFilter(admin.SimpleListFilter):
title = 'Last modified'
@@ -53,7 +55,9 @@
if self.value() is None:
return queryset
- return queryset.filter(**{self.value(): True})
+ if isinstance(queryset, SnippetQuerySet):
+ return queryset.filter(**{self.value(): True})
+ return queryset.filter(**{f'target__{self.value()}': True})
class ActivityStreamFilter(admin.SimpleListFilter):
| {"golden_diff": "diff --git a/snippets/base/admin/filters.py b/snippets/base/admin/filters.py\n--- a/snippets/base/admin/filters.py\n+++ b/snippets/base/admin/filters.py\n@@ -3,6 +3,8 @@\n from django.contrib import admin\n from django.utils.encoding import force_text\n \n+from snippets.base.managers import SnippetQuerySet\n+\n \n class ModifiedFilter(admin.SimpleListFilter):\n title = 'Last modified'\n@@ -53,7 +55,9 @@\n if self.value() is None:\n return queryset\n \n- return queryset.filter(**{self.value(): True})\n+ if isinstance(queryset, SnippetQuerySet):\n+ return queryset.filter(**{self.value(): True})\n+ return queryset.filter(**{f'target__{self.value()}': True})\n \n \n class ActivityStreamFilter(admin.SimpleListFilter):\n", "issue": "Filter by release channel on ASRSnippets raises an error\n\n", "before_files": [{"content": "from datetime import datetime, timedelta\n\nfrom django.contrib import admin\nfrom django.utils.encoding import force_text\n\n\nclass ModifiedFilter(admin.SimpleListFilter):\n title = 'Last modified'\n parameter_name = 'last_modified'\n\n def lookups(self, request, model_admin):\n return (\n ('24', '24 hours'),\n ('168', '7 days'),\n ('336', '14 days'),\n ('720', '30 days'),\n ('all', 'All'),\n )\n\n def queryset(self, request, queryset):\n value = self.value()\n if not value or value == 'all':\n return queryset\n\n when = datetime.utcnow() - timedelta(hours=int(value))\n return queryset.exclude(modified__lt=when)\n\n def choices(self, cl):\n for lookup, title in self.lookup_choices:\n yield {\n 'selected': self.value() == force_text(lookup),\n 'query_string': cl.get_query_string({\n self.parameter_name: lookup,\n }, []),\n 'display': title,\n }\n\n\nclass ChannelFilter(admin.SimpleListFilter):\n title = 'Channel'\n parameter_name = 'channel'\n\n def lookups(self, request, model_admin):\n return (\n ('on_release', 'Release'),\n ('on_esr', 'ESR'),\n ('on_beta', 'Beta'),\n ('on_aurora', 'Dev (Aurora)'),\n ('on_nightly', 'Nightly'),\n )\n\n def queryset(self, request, queryset):\n if self.value() is None:\n return queryset\n\n return queryset.filter(**{self.value(): True})\n\n\nclass ActivityStreamFilter(admin.SimpleListFilter):\n title = 'Activity Stream'\n parameter_name = 'is_activity_stream'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', 'Yes'),\n ('no', 'No'),\n )\n\n def queryset(self, request, queryset):\n if self.value() is None:\n return queryset\n elif self.value() == 'yes':\n return queryset.filter(on_startpage_5=True)\n elif self.value() == 'no':\n return queryset.exclude(on_startpage_5=True)\n", "path": "snippets/base/admin/filters.py"}], "after_files": [{"content": "from datetime import datetime, timedelta\n\nfrom django.contrib import admin\nfrom django.utils.encoding import force_text\n\nfrom snippets.base.managers import SnippetQuerySet\n\n\nclass ModifiedFilter(admin.SimpleListFilter):\n title = 'Last modified'\n parameter_name = 'last_modified'\n\n def lookups(self, request, model_admin):\n return (\n ('24', '24 hours'),\n ('168', '7 days'),\n ('336', '14 days'),\n ('720', '30 days'),\n ('all', 'All'),\n )\n\n def queryset(self, request, queryset):\n value = self.value()\n if not value or value == 'all':\n return queryset\n\n when = datetime.utcnow() - timedelta(hours=int(value))\n return queryset.exclude(modified__lt=when)\n\n def choices(self, cl):\n for lookup, title in self.lookup_choices:\n yield {\n 'selected': self.value() == force_text(lookup),\n 'query_string': cl.get_query_string({\n self.parameter_name: lookup,\n }, []),\n 'display': title,\n }\n\n\nclass ChannelFilter(admin.SimpleListFilter):\n title = 'Channel'\n parameter_name = 'channel'\n\n def lookups(self, request, model_admin):\n return (\n ('on_release', 'Release'),\n ('on_esr', 'ESR'),\n ('on_beta', 'Beta'),\n ('on_aurora', 'Dev (Aurora)'),\n ('on_nightly', 'Nightly'),\n )\n\n def queryset(self, request, queryset):\n if self.value() is None:\n return queryset\n\n if isinstance(queryset, SnippetQuerySet):\n return queryset.filter(**{self.value(): True})\n return queryset.filter(**{f'target__{self.value()}': True})\n\n\nclass ActivityStreamFilter(admin.SimpleListFilter):\n title = 'Activity Stream'\n parameter_name = 'is_activity_stream'\n\n def lookups(self, request, model_admin):\n return (\n ('yes', 'Yes'),\n ('no', 'No'),\n )\n\n def queryset(self, request, queryset):\n if self.value() is None:\n return queryset\n elif self.value() == 'yes':\n return queryset.filter(on_startpage_5=True)\n elif self.value() == 'no':\n return queryset.exclude(on_startpage_5=True)\n", "path": "snippets/base/admin/filters.py"}]} | 890 | 181 |
gh_patches_debug_21120 | rasdani/github-patches | git_diff | chainer__chainer-242 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add type check to NonparameterizedLinear function
Related to #123
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/nonparameterized_linear.py`
Content:
```
1 from chainer import cuda
2 from chainer import function
3 from chainer.functions import linear as linear_module
4
5
6 class NonparameterizedLinear(function.Function):
7
8 """Nonparameterized linear class.
9
10 .. seealso:: :class:`Linear`
11
12 """
13
14 def forward(self, x):
15 W = x[1]
16 b = None
17 if len(x) == 3:
18 b = x[2]
19 out_size, in_size = W.shape
20 func = linear_module.Linear(
21 in_size, out_size, initialW=W, initial_bias=b)
22 self.func = func
23 if any(isinstance(i, cuda.GPUArray) for i in x):
24 func.to_gpu()
25 return func.forward(x[:1])
26
27 def backward(self, x, gy):
28 func = self.func
29 func.zero_grads()
30 gx = func.backward(x[:1], gy)
31 if func.gb is None:
32 return (gx[0], func.gW)
33 return (gx[0], func.gW, func.gb)
34
35
36 def linear(x, W, b=None, stride=1, pad=0, use_cudnn=True):
37 """Nonparameterized linear function.
38
39 Args:
40 x (~chainer.Variable): Input variable.
41 W (~chainer.Variable): Weight variable.
42 b (~chainer.Variable): Bias variable.
43
44 Returns:
45 ~chainer.Variable: Output variable.
46
47 .. seealso:: :class:`Linear`
48
49 """
50
51 return NonparameterizedLinear()(x, W, b)
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/nonparameterized_linear.py b/chainer/functions/nonparameterized_linear.py
--- a/chainer/functions/nonparameterized_linear.py
+++ b/chainer/functions/nonparameterized_linear.py
@@ -1,6 +1,9 @@
+import numpy
+
from chainer import cuda
from chainer import function
from chainer.functions import linear as linear_module
+from chainer.utils import type_check
class NonparameterizedLinear(function.Function):
@@ -11,6 +14,29 @@
"""
+ def check_type_forward(self, in_types):
+ type_check.expect(
+ 2 <= in_types.size(),
+ in_types.size() <= 3,
+ )
+ x_type = in_types[0]
+ w_type = in_types[1]
+
+ prod = type_check.Variable(numpy.prod, 'prod')
+ type_check.expect(
+ x_type.dtype == numpy.float32,
+ w_type.dtype == numpy.float32,
+ x_type.ndim >= 2,
+ w_type.ndim == 2,
+ prod(x_type.shape[1:]) == w_type.shape[1],
+ )
+ if in_types.size().eval() == 3:
+ b_type = in_types[2]
+ type_check.expect(
+ b_type.ndim == 1,
+ b_type.shape[0] == w_type.shape[0],
+ )
+
def forward(self, x):
W = x[1]
b = None
| {"golden_diff": "diff --git a/chainer/functions/nonparameterized_linear.py b/chainer/functions/nonparameterized_linear.py\n--- a/chainer/functions/nonparameterized_linear.py\n+++ b/chainer/functions/nonparameterized_linear.py\n@@ -1,6 +1,9 @@\n+import numpy\n+\n from chainer import cuda\n from chainer import function\n from chainer.functions import linear as linear_module\n+from chainer.utils import type_check\n \n \n class NonparameterizedLinear(function.Function):\n@@ -11,6 +14,29 @@\n \n \"\"\"\n \n+ def check_type_forward(self, in_types):\n+ type_check.expect(\n+ 2 <= in_types.size(),\n+ in_types.size() <= 3,\n+ )\n+ x_type = in_types[0]\n+ w_type = in_types[1]\n+\n+ prod = type_check.Variable(numpy.prod, 'prod')\n+ type_check.expect(\n+ x_type.dtype == numpy.float32,\n+ w_type.dtype == numpy.float32,\n+ x_type.ndim >= 2,\n+ w_type.ndim == 2,\n+ prod(x_type.shape[1:]) == w_type.shape[1],\n+ )\n+ if in_types.size().eval() == 3:\n+ b_type = in_types[2]\n+ type_check.expect(\n+ b_type.ndim == 1,\n+ b_type.shape[0] == w_type.shape[0],\n+ )\n+\n def forward(self, x):\n W = x[1]\n b = None\n", "issue": "Add type check to NonparameterizedLinear function\nRelated to #123\n\n", "before_files": [{"content": "from chainer import cuda\nfrom chainer import function\nfrom chainer.functions import linear as linear_module\n\n\nclass NonparameterizedLinear(function.Function):\n\n \"\"\"Nonparameterized linear class.\n\n .. seealso:: :class:`Linear`\n\n \"\"\"\n\n def forward(self, x):\n W = x[1]\n b = None\n if len(x) == 3:\n b = x[2]\n out_size, in_size = W.shape\n func = linear_module.Linear(\n in_size, out_size, initialW=W, initial_bias=b)\n self.func = func\n if any(isinstance(i, cuda.GPUArray) for i in x):\n func.to_gpu()\n return func.forward(x[:1])\n\n def backward(self, x, gy):\n func = self.func\n func.zero_grads()\n gx = func.backward(x[:1], gy)\n if func.gb is None:\n return (gx[0], func.gW)\n return (gx[0], func.gW, func.gb)\n\n\ndef linear(x, W, b=None, stride=1, pad=0, use_cudnn=True):\n \"\"\"Nonparameterized linear function.\n\n Args:\n x (~chainer.Variable): Input variable.\n W (~chainer.Variable): Weight variable.\n b (~chainer.Variable): Bias variable.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n .. seealso:: :class:`Linear`\n\n \"\"\"\n\n return NonparameterizedLinear()(x, W, b)\n", "path": "chainer/functions/nonparameterized_linear.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.functions import linear as linear_module\nfrom chainer.utils import type_check\n\n\nclass NonparameterizedLinear(function.Function):\n\n \"\"\"Nonparameterized linear class.\n\n .. seealso:: :class:`Linear`\n\n \"\"\"\n\n def check_type_forward(self, in_types):\n type_check.expect(\n 2 <= in_types.size(),\n in_types.size() <= 3,\n )\n x_type = in_types[0]\n w_type = in_types[1]\n\n prod = type_check.Variable(numpy.prod, 'prod')\n type_check.expect(\n x_type.dtype == numpy.float32,\n w_type.dtype == numpy.float32,\n x_type.ndim >= 2,\n w_type.ndim == 2,\n prod(x_type.shape[1:]) == w_type.shape[1],\n )\n if in_types.size().eval() == 3:\n b_type = in_types[2]\n type_check.expect(\n b_type.ndim == 1,\n b_type.shape[0] == w_type.shape[0],\n )\n\n def forward(self, x):\n W = x[1]\n b = None\n if len(x) == 3:\n b = x[2]\n out_size, in_size = W.shape\n func = linear_module.Linear(\n in_size, out_size, initialW=W, initial_bias=b)\n self.func = func\n if any(isinstance(i, cuda.GPUArray) for i in x):\n func.to_gpu()\n return func.forward(x[:1])\n\n def backward(self, x, gy):\n func = self.func\n func.zero_grads()\n gx = func.backward(x[:1], gy)\n if func.gb is None:\n return (gx[0], func.gW)\n return (gx[0], func.gW, func.gb)\n\n\ndef linear(x, W, b=None, stride=1, pad=0, use_cudnn=True):\n \"\"\"Nonparameterized linear function.\n\n Args:\n x (~chainer.Variable): Input variable.\n W (~chainer.Variable): Weight variable.\n b (~chainer.Variable): Bias variable.\n\n Returns:\n ~chainer.Variable: Output variable.\n\n .. seealso:: :class:`Linear`\n\n \"\"\"\n\n return NonparameterizedLinear()(x, W, b)\n", "path": "chainer/functions/nonparameterized_linear.py"}]} | 705 | 330 |
gh_patches_debug_63158 | rasdani/github-patches | git_diff | dotkom__onlineweb4-2101 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Users should be able to edit expired 'careeropportunity' from Dashboard
## What kind of an issue is this?
- Feature request
## What is the expected behaviour?
You should be able to click to edit from the list of expired careeropportunities in the Dashboard.
## Other information
This was requested by one of our users on email.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `apps/careeropportunity/dashboard/views.py`
Content:
```
1 # -*- encoding: utf-8 -*-
2 import logging
3
4 from django.contrib import messages
5 from django.contrib.auth.decorators import login_required
6 from django.core.exceptions import PermissionDenied
7 from django.shortcuts import get_object_or_404, redirect, render
8 from django.utils import timezone
9 from guardian.decorators import permission_required
10
11 from apps.careeropportunity.forms import AddCareerOpportunityForm
12 from apps.careeropportunity.models import CareerOpportunity
13 from apps.dashboard.tools import get_base_context, has_access
14
15
16 @login_required
17 @permission_required('careeropportunity.view_careeropportunity', return_403=True)
18 def index(request):
19
20 if not has_access(request):
21 raise PermissionDenied
22
23 context = get_base_context(request)
24
25 # "cops" is short for "careeropportunities" which is a fucking long word
26 # "cop" is short for "careeropportunity" which also is a fucking long word
27 cops = CareerOpportunity.objects.all()
28 context['cops'] = cops.filter(end__gte=timezone.now()).order_by('end')
29 context['archive'] = cops.filter(end__lte=timezone.now()).order_by('-id')
30
31 return render(request, 'careeropportunity/dashboard/index.html', context)
32
33
34 @login_required
35 @permission_required('careeropportunity.change_careeropportunity', return_403=True)
36 def detail(request, opportunity_id=None):
37 logger = logging.getLogger(__name__)
38 logger.debug('Editing careeropportunity with id: %s' % (opportunity_id))
39
40 if not has_access(request):
41 raise PermissionDenied
42
43 context = get_base_context(request)
44 cop = None
45 if opportunity_id:
46 cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)
47 context['cop'] = cop
48 context['form'] = AddCareerOpportunityForm(instance=cop)
49 else:
50 context['form'] = AddCareerOpportunityForm()
51
52 if request.method == 'POST':
53 if cop:
54 form = AddCareerOpportunityForm(data=request.POST, instance=cop)
55 else:
56 form = AddCareerOpportunityForm(data=request.POST)
57
58 if form.is_valid():
59 form.save()
60 messages.success(request, 'La til ny karrieremulighet')
61 return redirect(index)
62 else:
63 context['form'] = form
64 messages.error(request,
65 'Skjemaet ble ikke korrekt utfylt. Se etter markerte felter for å se hva som gikk galt.')
66
67 return render(request, 'careeropportunity/dashboard/detail.html', context)
68
69
70 @login_required
71 @permission_required('careeropportunity.change_careeropportunity', return_403=True)
72 def delete(request, opportunity_id=None):
73 logger = logging.getLogger(__name__)
74 logger.debug('Deleting careeropportunitywith id: %s' % (opportunity_id))
75 if not has_access(request):
76 raise PermissionDenied
77
78 cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)
79 cop.delete()
80 messages.success(request, 'Slettet karrieremuligheten')
81 return redirect(index)
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/apps/careeropportunity/dashboard/views.py b/apps/careeropportunity/dashboard/views.py
--- a/apps/careeropportunity/dashboard/views.py
+++ b/apps/careeropportunity/dashboard/views.py
@@ -27,7 +27,7 @@
cops = CareerOpportunity.objects.all()
context['cops'] = cops.filter(end__gte=timezone.now()).order_by('end')
context['archive'] = cops.filter(end__lte=timezone.now()).order_by('-id')
-
+ context['all'] = cops
return render(request, 'careeropportunity/dashboard/index.html', context)
| {"golden_diff": "diff --git a/apps/careeropportunity/dashboard/views.py b/apps/careeropportunity/dashboard/views.py\n--- a/apps/careeropportunity/dashboard/views.py\n+++ b/apps/careeropportunity/dashboard/views.py\n@@ -27,7 +27,7 @@\n cops = CareerOpportunity.objects.all()\n context['cops'] = cops.filter(end__gte=timezone.now()).order_by('end')\n context['archive'] = cops.filter(end__lte=timezone.now()).order_by('-id')\n-\n+ context['all'] = cops\n return render(request, 'careeropportunity/dashboard/index.html', context)\n", "issue": "Users should be able to edit expired 'careeropportunity' from Dashboard\n## What kind of an issue is this?\r\n- Feature request\r\n\r\n## What is the expected behaviour?\r\n\r\nYou should be able to click to edit from the list of expired careeropportunities in the Dashboard.\r\n\r\n## Other information\r\n\r\nThis was requested by one of our users on email.\r\n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\nimport logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils import timezone\nfrom guardian.decorators import permission_required\n\nfrom apps.careeropportunity.forms import AddCareerOpportunityForm\nfrom apps.careeropportunity.models import CareerOpportunity\nfrom apps.dashboard.tools import get_base_context, has_access\n\n\n@login_required\n@permission_required('careeropportunity.view_careeropportunity', return_403=True)\ndef index(request):\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n\n # \"cops\" is short for \"careeropportunities\" which is a fucking long word\n # \"cop\" is short for \"careeropportunity\" which also is a fucking long word\n cops = CareerOpportunity.objects.all()\n context['cops'] = cops.filter(end__gte=timezone.now()).order_by('end')\n context['archive'] = cops.filter(end__lte=timezone.now()).order_by('-id')\n\n return render(request, 'careeropportunity/dashboard/index.html', context)\n\n\n@login_required\n@permission_required('careeropportunity.change_careeropportunity', return_403=True)\ndef detail(request, opportunity_id=None):\n logger = logging.getLogger(__name__)\n logger.debug('Editing careeropportunity with id: %s' % (opportunity_id))\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n cop = None\n if opportunity_id:\n cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n context['cop'] = cop\n context['form'] = AddCareerOpportunityForm(instance=cop)\n else:\n context['form'] = AddCareerOpportunityForm()\n\n if request.method == 'POST':\n if cop:\n form = AddCareerOpportunityForm(data=request.POST, instance=cop)\n else:\n form = AddCareerOpportunityForm(data=request.POST)\n\n if form.is_valid():\n form.save()\n messages.success(request, 'La til ny karrieremulighet')\n return redirect(index)\n else:\n context['form'] = form\n messages.error(request,\n 'Skjemaet ble ikke korrekt utfylt. Se etter markerte felter for \u00e5 se hva som gikk galt.')\n\n return render(request, 'careeropportunity/dashboard/detail.html', context)\n\n\n@login_required\n@permission_required('careeropportunity.change_careeropportunity', return_403=True)\ndef delete(request, opportunity_id=None):\n logger = logging.getLogger(__name__)\n logger.debug('Deleting careeropportunitywith id: %s' % (opportunity_id))\n if not has_access(request):\n raise PermissionDenied\n\n cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n cop.delete()\n messages.success(request, 'Slettet karrieremuligheten')\n return redirect(index)\n", "path": "apps/careeropportunity/dashboard/views.py"}], "after_files": [{"content": "# -*- encoding: utf-8 -*-\nimport logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils import timezone\nfrom guardian.decorators import permission_required\n\nfrom apps.careeropportunity.forms import AddCareerOpportunityForm\nfrom apps.careeropportunity.models import CareerOpportunity\nfrom apps.dashboard.tools import get_base_context, has_access\n\n\n@login_required\n@permission_required('careeropportunity.view_careeropportunity', return_403=True)\ndef index(request):\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n\n # \"cops\" is short for \"careeropportunities\" which is a fucking long word\n # \"cop\" is short for \"careeropportunity\" which also is a fucking long word\n cops = CareerOpportunity.objects.all()\n context['cops'] = cops.filter(end__gte=timezone.now()).order_by('end')\n context['archive'] = cops.filter(end__lte=timezone.now()).order_by('-id')\n context['all'] = cops\n return render(request, 'careeropportunity/dashboard/index.html', context)\n\n\n@login_required\n@permission_required('careeropportunity.change_careeropportunity', return_403=True)\ndef detail(request, opportunity_id=None):\n logger = logging.getLogger(__name__)\n logger.debug('Editing careeropportunity with id: %s' % (opportunity_id))\n\n if not has_access(request):\n raise PermissionDenied\n\n context = get_base_context(request)\n cop = None\n if opportunity_id:\n cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n context['cop'] = cop\n context['form'] = AddCareerOpportunityForm(instance=cop)\n else:\n context['form'] = AddCareerOpportunityForm()\n\n if request.method == 'POST':\n if cop:\n form = AddCareerOpportunityForm(data=request.POST, instance=cop)\n else:\n form = AddCareerOpportunityForm(data=request.POST)\n\n if form.is_valid():\n form.save()\n messages.success(request, 'La til ny karrieremulighet')\n return redirect(index)\n else:\n context['form'] = form\n messages.error(request,\n 'Skjemaet ble ikke korrekt utfylt. Se etter markerte felter for \u00e5 se hva som gikk galt.')\n\n return render(request, 'careeropportunity/dashboard/detail.html', context)\n\n\n@login_required\n@permission_required('careeropportunity.change_careeropportunity', return_403=True)\ndef delete(request, opportunity_id=None):\n logger = logging.getLogger(__name__)\n logger.debug('Deleting careeropportunitywith id: %s' % (opportunity_id))\n if not has_access(request):\n raise PermissionDenied\n\n cop = get_object_or_404(CareerOpportunity, pk=opportunity_id)\n cop.delete()\n messages.success(request, 'Slettet karrieremuligheten')\n return redirect(index)\n", "path": "apps/careeropportunity/dashboard/views.py"}]} | 1,174 | 135 |
gh_patches_debug_12009 | rasdani/github-patches | git_diff | Netflix__lemur-111 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Duplicate Plugins Listed
Plugins are duplicated in the authority dropdown.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lemur/plugins/views.py`
Content:
```
1 """
2 .. module: lemur.plugins.views
3 :platform: Unix
4 :synopsis: This module contains all of the accounts view code.
5 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
6 :license: Apache, see LICENSE for more details.
7 .. moduleauthor:: Kevin Glisson <[email protected]>
8 """
9 from flask import Blueprint
10 from flask.ext.restful import Api, reqparse, fields
11 from lemur.auth.service import AuthenticatedResource
12
13 from lemur.common.utils import marshal_items
14
15 from lemur.plugins.base import plugins
16
17 mod = Blueprint('plugins', __name__)
18 api = Api(mod)
19
20
21 FIELDS = {
22 'title': fields.String,
23 'pluginOptions': fields.Raw(attribute='options'),
24 'description': fields.String,
25 'version': fields.String,
26 'author': fields.String,
27 'authorUrl': fields.String,
28 'type': fields.String,
29 'slug': fields.String,
30 }
31
32
33 class PluginsList(AuthenticatedResource):
34 """ Defines the 'plugins' endpoint """
35 def __init__(self):
36 self.reqparse = reqparse.RequestParser()
37 super(PluginsList, self).__init__()
38
39 @marshal_items(FIELDS)
40 def get(self):
41 """
42 .. http:get:: /plugins
43
44 The current plugin list
45
46 **Example request**:
47
48 .. sourcecode:: http
49
50 GET /plugins HTTP/1.1
51 Host: example.com
52 Accept: application/json, text/javascript
53
54 **Example response**:
55
56 .. sourcecode:: http
57
58 HTTP/1.1 200 OK
59 Vary: Accept
60 Content-Type: text/javascript
61
62 {
63 "items": [
64 {
65 "id": 2,
66 "accountNumber": 222222222,
67 "label": "account2",
68 "description": "this is a thing"
69 },
70 {
71 "id": 1,
72 "accountNumber": 11111111111,
73 "label": "account1",
74 "description": "this is a thing"
75 },
76 ]
77 "total": 2
78 }
79
80 :reqheader Authorization: OAuth token to authenticate
81 :statuscode 200: no error
82 """
83 self.reqparse.add_argument('type', type=str, location='args')
84 args = self.reqparse.parse_args()
85
86 if args['type']:
87 return list(plugins.all(plugin_type=args['type']))
88
89 return plugins.all()
90
91
92 class Plugins(AuthenticatedResource):
93 """ Defines the the 'plugins' endpoint """
94 def __init__(self):
95 super(Plugins, self).__init__()
96
97 @marshal_items(FIELDS)
98 def get(self, name):
99 """
100 .. http:get:: /plugins/<name>
101
102 The current plugin list
103
104 **Example request**:
105
106 .. sourcecode:: http
107
108 GET /plugins HTTP/1.1
109 Host: example.com
110 Accept: application/json, text/javascript
111
112 **Example response**:
113
114 .. sourcecode:: http
115
116 HTTP/1.1 200 OK
117 Vary: Accept
118 Content-Type: text/javascript
119
120 {
121 "accountNumber": 222222222,
122 "label": "account2",
123 "description": "this is a thing"
124 }
125
126 :reqheader Authorization: OAuth token to authenticate
127 :statuscode 200: no error
128 """
129 return plugins.get(name)
130
131
132 api.add_resource(PluginsList, '/plugins', endpoint='plugins')
133 api.add_resource(Plugins, '/plugins/<name>', endpoint='pluginName')
134
```
Path: `lemur/common/managers.py`
Content:
```
1 """
2 .. module: lemur.common.managers
3 :platform: Unix
4 :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more
5 :license: Apache, see LICENSE for more details.
6
7 .. moduleauthor:: Kevin Glisson <[email protected]>
8 """
9 from flask import current_app
10
11
12 # inspired by https://github.com/getsentry/sentry
13 class InstanceManager(object):
14 def __init__(self, class_list=None, instances=True):
15 if class_list is None:
16 class_list = []
17 self.instances = instances
18 self.update(class_list)
19
20 def get_class_list(self):
21 return self.class_list
22
23 def add(self, class_path):
24 self.cache = None
25 self.class_list.append(class_path)
26
27 def remove(self, class_path):
28 self.cache = None
29 self.class_list.remove(class_path)
30
31 def update(self, class_list):
32 """
33 Updates the class list and wipes the cache.
34 """
35 self.cache = None
36 self.class_list = class_list
37
38 def all(self):
39 """
40 Returns a list of cached instances.
41 """
42 class_list = list(self.get_class_list())
43 if not class_list:
44 self.cache = []
45 return []
46
47 if self.cache is not None:
48 return self.cache
49
50 results = []
51 for cls_path in class_list:
52 module_name, class_name = cls_path.rsplit('.', 1)
53 try:
54 module = __import__(module_name, {}, {}, class_name)
55 cls = getattr(module, class_name)
56 if self.instances:
57 results.append(cls())
58 else:
59 results.append(cls)
60 except Exception:
61 current_app.logger.exception('Unable to import %s', cls_path)
62 continue
63 self.cache = results
64
65 return results
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lemur/common/managers.py b/lemur/common/managers.py
--- a/lemur/common/managers.py
+++ b/lemur/common/managers.py
@@ -22,7 +22,8 @@
def add(self, class_path):
self.cache = None
- self.class_list.append(class_path)
+ if class_path not in self.class_list:
+ self.class_list.append(class_path)
def remove(self, class_path):
self.cache = None
diff --git a/lemur/plugins/views.py b/lemur/plugins/views.py
--- a/lemur/plugins/views.py
+++ b/lemur/plugins/views.py
@@ -86,7 +86,7 @@
if args['type']:
return list(plugins.all(plugin_type=args['type']))
- return plugins.all()
+ return list(plugins.all())
class Plugins(AuthenticatedResource):
| {"golden_diff": "diff --git a/lemur/common/managers.py b/lemur/common/managers.py\n--- a/lemur/common/managers.py\n+++ b/lemur/common/managers.py\n@@ -22,7 +22,8 @@\n \n def add(self, class_path):\n self.cache = None\n- self.class_list.append(class_path)\n+ if class_path not in self.class_list:\n+ self.class_list.append(class_path)\n \n def remove(self, class_path):\n self.cache = None\ndiff --git a/lemur/plugins/views.py b/lemur/plugins/views.py\n--- a/lemur/plugins/views.py\n+++ b/lemur/plugins/views.py\n@@ -86,7 +86,7 @@\n if args['type']:\n return list(plugins.all(plugin_type=args['type']))\n \n- return plugins.all()\n+ return list(plugins.all())\n \n \n class Plugins(AuthenticatedResource):\n", "issue": "Duplicate Plugins Listed\nPlugins are duplicated in the authority dropdown.\n\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.plugins.views\n :platform: Unix\n :synopsis: This module contains all of the accounts view code.\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nfrom flask import Blueprint\nfrom flask.ext.restful import Api, reqparse, fields\nfrom lemur.auth.service import AuthenticatedResource\n\nfrom lemur.common.utils import marshal_items\n\nfrom lemur.plugins.base import plugins\n\nmod = Blueprint('plugins', __name__)\napi = Api(mod)\n\n\nFIELDS = {\n 'title': fields.String,\n 'pluginOptions': fields.Raw(attribute='options'),\n 'description': fields.String,\n 'version': fields.String,\n 'author': fields.String,\n 'authorUrl': fields.String,\n 'type': fields.String,\n 'slug': fields.String,\n}\n\n\nclass PluginsList(AuthenticatedResource):\n \"\"\" Defines the 'plugins' endpoint \"\"\"\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n super(PluginsList, self).__init__()\n\n @marshal_items(FIELDS)\n def get(self):\n \"\"\"\n .. http:get:: /plugins\n\n The current plugin list\n\n **Example request**:\n\n .. sourcecode:: http\n\n GET /plugins HTTP/1.1\n Host: example.com\n Accept: application/json, text/javascript\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Vary: Accept\n Content-Type: text/javascript\n\n {\n \"items\": [\n {\n \"id\": 2,\n \"accountNumber\": 222222222,\n \"label\": \"account2\",\n \"description\": \"this is a thing\"\n },\n {\n \"id\": 1,\n \"accountNumber\": 11111111111,\n \"label\": \"account1\",\n \"description\": \"this is a thing\"\n },\n ]\n \"total\": 2\n }\n\n :reqheader Authorization: OAuth token to authenticate\n :statuscode 200: no error\n \"\"\"\n self.reqparse.add_argument('type', type=str, location='args')\n args = self.reqparse.parse_args()\n\n if args['type']:\n return list(plugins.all(plugin_type=args['type']))\n\n return plugins.all()\n\n\nclass Plugins(AuthenticatedResource):\n \"\"\" Defines the the 'plugins' endpoint \"\"\"\n def __init__(self):\n super(Plugins, self).__init__()\n\n @marshal_items(FIELDS)\n def get(self, name):\n \"\"\"\n .. http:get:: /plugins/<name>\n\n The current plugin list\n\n **Example request**:\n\n .. sourcecode:: http\n\n GET /plugins HTTP/1.1\n Host: example.com\n Accept: application/json, text/javascript\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Vary: Accept\n Content-Type: text/javascript\n\n {\n \"accountNumber\": 222222222,\n \"label\": \"account2\",\n \"description\": \"this is a thing\"\n }\n\n :reqheader Authorization: OAuth token to authenticate\n :statuscode 200: no error\n \"\"\"\n return plugins.get(name)\n\n\napi.add_resource(PluginsList, '/plugins', endpoint='plugins')\napi.add_resource(Plugins, '/plugins/<name>', endpoint='pluginName')\n", "path": "lemur/plugins/views.py"}, {"content": "\"\"\"\n.. module: lemur.common.managers\n :platform: Unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nfrom flask import current_app\n\n\n# inspired by https://github.com/getsentry/sentry\nclass InstanceManager(object):\n def __init__(self, class_list=None, instances=True):\n if class_list is None:\n class_list = []\n self.instances = instances\n self.update(class_list)\n\n def get_class_list(self):\n return self.class_list\n\n def add(self, class_path):\n self.cache = None\n self.class_list.append(class_path)\n\n def remove(self, class_path):\n self.cache = None\n self.class_list.remove(class_path)\n\n def update(self, class_list):\n \"\"\"\n Updates the class list and wipes the cache.\n \"\"\"\n self.cache = None\n self.class_list = class_list\n\n def all(self):\n \"\"\"\n Returns a list of cached instances.\n \"\"\"\n class_list = list(self.get_class_list())\n if not class_list:\n self.cache = []\n return []\n\n if self.cache is not None:\n return self.cache\n\n results = []\n for cls_path in class_list:\n module_name, class_name = cls_path.rsplit('.', 1)\n try:\n module = __import__(module_name, {}, {}, class_name)\n cls = getattr(module, class_name)\n if self.instances:\n results.append(cls())\n else:\n results.append(cls)\n except Exception:\n current_app.logger.exception('Unable to import %s', cls_path)\n continue\n self.cache = results\n\n return results\n", "path": "lemur/common/managers.py"}], "after_files": [{"content": "\"\"\"\n.. module: lemur.plugins.views\n :platform: Unix\n :synopsis: This module contains all of the accounts view code.\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nfrom flask import Blueprint\nfrom flask.ext.restful import Api, reqparse, fields\nfrom lemur.auth.service import AuthenticatedResource\n\nfrom lemur.common.utils import marshal_items\n\nfrom lemur.plugins.base import plugins\n\nmod = Blueprint('plugins', __name__)\napi = Api(mod)\n\n\nFIELDS = {\n 'title': fields.String,\n 'pluginOptions': fields.Raw(attribute='options'),\n 'description': fields.String,\n 'version': fields.String,\n 'author': fields.String,\n 'authorUrl': fields.String,\n 'type': fields.String,\n 'slug': fields.String,\n}\n\n\nclass PluginsList(AuthenticatedResource):\n \"\"\" Defines the 'plugins' endpoint \"\"\"\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n super(PluginsList, self).__init__()\n\n @marshal_items(FIELDS)\n def get(self):\n \"\"\"\n .. http:get:: /plugins\n\n The current plugin list\n\n **Example request**:\n\n .. sourcecode:: http\n\n GET /plugins HTTP/1.1\n Host: example.com\n Accept: application/json, text/javascript\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Vary: Accept\n Content-Type: text/javascript\n\n {\n \"items\": [\n {\n \"id\": 2,\n \"accountNumber\": 222222222,\n \"label\": \"account2\",\n \"description\": \"this is a thing\"\n },\n {\n \"id\": 1,\n \"accountNumber\": 11111111111,\n \"label\": \"account1\",\n \"description\": \"this is a thing\"\n },\n ]\n \"total\": 2\n }\n\n :reqheader Authorization: OAuth token to authenticate\n :statuscode 200: no error\n \"\"\"\n self.reqparse.add_argument('type', type=str, location='args')\n args = self.reqparse.parse_args()\n\n if args['type']:\n return list(plugins.all(plugin_type=args['type']))\n\n return list(plugins.all())\n\n\nclass Plugins(AuthenticatedResource):\n \"\"\" Defines the the 'plugins' endpoint \"\"\"\n def __init__(self):\n super(Plugins, self).__init__()\n\n @marshal_items(FIELDS)\n def get(self, name):\n \"\"\"\n .. http:get:: /plugins/<name>\n\n The current plugin list\n\n **Example request**:\n\n .. sourcecode:: http\n\n GET /plugins HTTP/1.1\n Host: example.com\n Accept: application/json, text/javascript\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Vary: Accept\n Content-Type: text/javascript\n\n {\n \"accountNumber\": 222222222,\n \"label\": \"account2\",\n \"description\": \"this is a thing\"\n }\n\n :reqheader Authorization: OAuth token to authenticate\n :statuscode 200: no error\n \"\"\"\n return plugins.get(name)\n\n\napi.add_resource(PluginsList, '/plugins', endpoint='plugins')\napi.add_resource(Plugins, '/plugins/<name>', endpoint='pluginName')\n", "path": "lemur/plugins/views.py"}, {"content": "\"\"\"\n.. module: lemur.common.managers\n :platform: Unix\n :copyright: (c) 2015 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\"\"\"\nfrom flask import current_app\n\n\n# inspired by https://github.com/getsentry/sentry\nclass InstanceManager(object):\n def __init__(self, class_list=None, instances=True):\n if class_list is None:\n class_list = []\n self.instances = instances\n self.update(class_list)\n\n def get_class_list(self):\n return self.class_list\n\n def add(self, class_path):\n self.cache = None\n if class_path not in self.class_list:\n self.class_list.append(class_path)\n\n def remove(self, class_path):\n self.cache = None\n self.class_list.remove(class_path)\n\n def update(self, class_list):\n \"\"\"\n Updates the class list and wipes the cache.\n \"\"\"\n self.cache = None\n self.class_list = class_list\n\n def all(self):\n \"\"\"\n Returns a list of cached instances.\n \"\"\"\n class_list = list(self.get_class_list())\n if not class_list:\n self.cache = []\n return []\n\n if self.cache is not None:\n return self.cache\n\n results = []\n for cls_path in class_list:\n module_name, class_name = cls_path.rsplit('.', 1)\n try:\n module = __import__(module_name, {}, {}, class_name)\n cls = getattr(module, class_name)\n if self.instances:\n results.append(cls())\n else:\n results.append(cls)\n except Exception:\n current_app.logger.exception('Unable to import %s', cls_path)\n continue\n self.cache = results\n\n return results\n", "path": "lemur/common/managers.py"}]} | 1,901 | 199 |
gh_patches_debug_12142 | rasdani/github-patches | git_diff | safe-global__safe-config-service-90 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use different namespace and endpoint name for `/safe-apps`
The endpoint `/api/v1/safe-apps` is currently under the `v1` namespace and `safe-apps` endpoint name.
To align it better with the future endpoints the following should be changed:
- the namespace changes from `v1` to `safe-apps`
- the endpoint name changes from `safe-apps` to `list`
This results in a reverse url resolution with `safe-apps:list` instead of `v1:safe-apps`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/config/urls.py`
Content:
```
1 from django.contrib import admin
2 from django.http import HttpResponse
3 from django.urls import include, path, re_path
4 from drf_yasg.views import get_schema_view
5 from rest_framework import permissions
6
7 schema_view = get_schema_view(
8 validators=["flex", "ssv"],
9 public=True,
10 permission_classes=(permissions.AllowAny,),
11 )
12
13 urlpatterns = [
14 path("api/v1/", include("safe_apps.urls", namespace="v1")),
15 path("api/v1/", include("chains.urls", namespace="chains")),
16 path("admin/", admin.site.urls),
17 path("check/", lambda request: HttpResponse("Ok"), name="check"),
18 re_path(
19 r"^swagger(?P<format>\.json|\.yaml)$",
20 schema_view.without_ui(cache_timeout=0),
21 name="schema-json",
22 ),
23 re_path(
24 r"^$",
25 schema_view.with_ui("swagger", cache_timeout=0),
26 name="schema-swagger-ui",
27 ),
28 ]
29
```
Path: `src/safe_apps/urls.py`
Content:
```
1 from django.urls import path
2
3 from .views import SafeAppsListView
4
5 app_name = "apps"
6
7 urlpatterns = [
8 path("safe-apps/", SafeAppsListView.as_view(), name="safe-apps"),
9 ]
10
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/config/urls.py b/src/config/urls.py
--- a/src/config/urls.py
+++ b/src/config/urls.py
@@ -11,7 +11,7 @@
)
urlpatterns = [
- path("api/v1/", include("safe_apps.urls", namespace="v1")),
+ path("api/v1/", include("safe_apps.urls", namespace="safe-apps")),
path("api/v1/", include("chains.urls", namespace="chains")),
path("admin/", admin.site.urls),
path("check/", lambda request: HttpResponse("Ok"), name="check"),
diff --git a/src/safe_apps/urls.py b/src/safe_apps/urls.py
--- a/src/safe_apps/urls.py
+++ b/src/safe_apps/urls.py
@@ -5,5 +5,5 @@
app_name = "apps"
urlpatterns = [
- path("safe-apps/", SafeAppsListView.as_view(), name="safe-apps"),
+ path("safe-apps/", SafeAppsListView.as_view(), name="list"),
]
| {"golden_diff": "diff --git a/src/config/urls.py b/src/config/urls.py\n--- a/src/config/urls.py\n+++ b/src/config/urls.py\n@@ -11,7 +11,7 @@\n )\n \n urlpatterns = [\n- path(\"api/v1/\", include(\"safe_apps.urls\", namespace=\"v1\")),\n+ path(\"api/v1/\", include(\"safe_apps.urls\", namespace=\"safe-apps\")),\n path(\"api/v1/\", include(\"chains.urls\", namespace=\"chains\")),\n path(\"admin/\", admin.site.urls),\n path(\"check/\", lambda request: HttpResponse(\"Ok\"), name=\"check\"),\ndiff --git a/src/safe_apps/urls.py b/src/safe_apps/urls.py\n--- a/src/safe_apps/urls.py\n+++ b/src/safe_apps/urls.py\n@@ -5,5 +5,5 @@\n app_name = \"apps\"\n \n urlpatterns = [\n- path(\"safe-apps/\", SafeAppsListView.as_view(), name=\"safe-apps\"),\n+ path(\"safe-apps/\", SafeAppsListView.as_view(), name=\"list\"),\n ]\n", "issue": "Use different namespace and endpoint name for `/safe-apps`\nThe endpoint `/api/v1/safe-apps` is currently under the `v1` namespace and `safe-apps` endpoint name.\r\n\r\nTo align it better with the future endpoints the following should be changed:\r\n\r\n- the namespace changes from `v1` to `safe-apps`\r\n- the endpoint name changes from `safe-apps` to `list`\r\n\r\nThis results in a reverse url resolution with `safe-apps:list` instead of `v1:safe-apps`\n", "before_files": [{"content": "from django.contrib import admin\nfrom django.http import HttpResponse\nfrom django.urls import include, path, re_path\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework import permissions\n\nschema_view = get_schema_view(\n validators=[\"flex\", \"ssv\"],\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns = [\n path(\"api/v1/\", include(\"safe_apps.urls\", namespace=\"v1\")),\n path(\"api/v1/\", include(\"chains.urls\", namespace=\"chains\")),\n path(\"admin/\", admin.site.urls),\n path(\"check/\", lambda request: HttpResponse(\"Ok\"), name=\"check\"),\n re_path(\n r\"^swagger(?P<format>\\.json|\\.yaml)$\",\n schema_view.without_ui(cache_timeout=0),\n name=\"schema-json\",\n ),\n re_path(\n r\"^$\",\n schema_view.with_ui(\"swagger\", cache_timeout=0),\n name=\"schema-swagger-ui\",\n ),\n]\n", "path": "src/config/urls.py"}, {"content": "from django.urls import path\n\nfrom .views import SafeAppsListView\n\napp_name = \"apps\"\n\nurlpatterns = [\n path(\"safe-apps/\", SafeAppsListView.as_view(), name=\"safe-apps\"),\n]\n", "path": "src/safe_apps/urls.py"}], "after_files": [{"content": "from django.contrib import admin\nfrom django.http import HttpResponse\nfrom django.urls import include, path, re_path\nfrom drf_yasg.views import get_schema_view\nfrom rest_framework import permissions\n\nschema_view = get_schema_view(\n validators=[\"flex\", \"ssv\"],\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns = [\n path(\"api/v1/\", include(\"safe_apps.urls\", namespace=\"safe-apps\")),\n path(\"api/v1/\", include(\"chains.urls\", namespace=\"chains\")),\n path(\"admin/\", admin.site.urls),\n path(\"check/\", lambda request: HttpResponse(\"Ok\"), name=\"check\"),\n re_path(\n r\"^swagger(?P<format>\\.json|\\.yaml)$\",\n schema_view.without_ui(cache_timeout=0),\n name=\"schema-json\",\n ),\n re_path(\n r\"^$\",\n schema_view.with_ui(\"swagger\", cache_timeout=0),\n name=\"schema-swagger-ui\",\n ),\n]\n", "path": "src/config/urls.py"}, {"content": "from django.urls import path\n\nfrom .views import SafeAppsListView\n\napp_name = \"apps\"\n\nurlpatterns = [\n path(\"safe-apps/\", SafeAppsListView.as_view(), name=\"list\"),\n]\n", "path": "src/safe_apps/urls.py"}]} | 699 | 228 |
gh_patches_debug_41061 | rasdani/github-patches | git_diff | streamlink__streamlink-3019 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] BTV plugin needs updating
## Bug Report
- [x] This is a bug report and I have read the contribution guidelines.
### Description
The location of the BTV livestream has moved to https://btvplus.bg/live/
**Edit**: Livestreaming no longer requires a user to login, so that can be removed from the plugin info page.
### Expected / Actual behavior
Streamlink should be able to handle the link.
### Reproduction steps / Explicit stream URLs to test
1. streamlink https://btvplus.bg/live/ best
2. error: No plugin can handle URL: https://btvplus.bg/live/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/btv.py`
Content:
```
1 from __future__ import print_function
2 import re
3
4 from streamlink import PluginError
5 from streamlink.plugin import Plugin
6 from streamlink.plugin.api import validate
7 from streamlink.stream import HLSStream
8 from streamlink.utils import parse_json
9 from streamlink.plugin import PluginArgument, PluginArguments
10
11
12 class BTV(Plugin):
13 arguments = PluginArguments(
14 PluginArgument(
15 "username",
16 metavar="USERNAME",
17 requires=["password"],
18 help="""
19 A BTV username required to access any stream.
20 """
21 ),
22 PluginArgument(
23 "password",
24 sensitive=True,
25 metavar="PASSWORD",
26 help="""
27 A BTV account password to use with --btv-username.
28 """
29 )
30 )
31 url_re = re.compile(r"https?://(?:www\.)?btv\.bg/live/?")
32
33 api_url = "http://www.btv.bg/lbin/global/player_config.php"
34 check_login_url = "http://www.btv.bg/lbin/userRegistration/check_user_login.php"
35 login_url = "https://www.btv.bg/bin/registration2/login.php?action=login&settings=0"
36
37 media_id_re = re.compile(r"media_id=(\d+)")
38 src_re = re.compile(r"src: \"(http.*?)\"")
39 api_schema = validate.Schema(
40 validate.all(
41 {"status": "ok", "config": validate.text},
42 validate.get("config"),
43 validate.all(
44 validate.transform(src_re.search),
45 validate.any(
46 None,
47 validate.get(1),
48 validate.url()
49 )
50 )
51 )
52 )
53
54 @classmethod
55 def can_handle_url(cls, url):
56 return cls.url_re.match(url) is not None
57
58 def login(self, username, password):
59 res = self.session.http.post(self.login_url, data={"username": username, "password": password})
60 if "success_logged_in" in res.text:
61 return True
62 else:
63 return False
64
65 def get_hls_url(self, media_id):
66 res = self.session.http.get(self.api_url, params=dict(media_id=media_id))
67 try:
68 return parse_json(res.text, schema=self.api_schema)
69 except PluginError:
70 return
71
72 def _get_streams(self):
73 if not self.options.get("username") or not self.options.get("password"):
74 self.logger.error("BTV requires registration, set the username and password"
75 " with --btv-username and --btv-password")
76 elif self.login(self.options.get("username"), self.options.get("password")):
77 res = self.session.http.get(self.url)
78 media_match = self.media_id_re.search(res.text)
79 media_id = media_match and media_match.group(1)
80 if media_id:
81 self.logger.debug("Found media id: {0}", media_id)
82 stream_url = self.get_hls_url(media_id)
83 if stream_url:
84 return HLSStream.parse_variant_playlist(self.session, stream_url)
85 else:
86 self.logger.error("Login failed, a valid username and password is required")
87
88
89 __plugin__ = BTV
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/btv.py b/src/streamlink/plugins/btv.py
--- a/src/streamlink/plugins/btv.py
+++ b/src/streamlink/plugins/btv.py
@@ -1,38 +1,30 @@
-from __future__ import print_function
+import argparse
+import logging
import re
-from streamlink import PluginError
-from streamlink.plugin import Plugin
+from streamlink.plugin import Plugin, PluginArguments, PluginArgument
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
from streamlink.utils import parse_json
-from streamlink.plugin import PluginArgument, PluginArguments
+
+log = logging.getLogger(__name__)
class BTV(Plugin):
arguments = PluginArguments(
PluginArgument(
"username",
- metavar="USERNAME",
- requires=["password"],
- help="""
- A BTV username required to access any stream.
- """
+ help=argparse.SUPPRESS
),
PluginArgument(
"password",
sensitive=True,
- metavar="PASSWORD",
- help="""
- A BTV account password to use with --btv-username.
- """
+ help=argparse.SUPPRESS
)
)
- url_re = re.compile(r"https?://(?:www\.)?btv\.bg/live/?")
- api_url = "http://www.btv.bg/lbin/global/player_config.php"
- check_login_url = "http://www.btv.bg/lbin/userRegistration/check_user_login.php"
- login_url = "https://www.btv.bg/bin/registration2/login.php?action=login&settings=0"
+ url_re = re.compile(r"https?://(?:www\.)?btvplus\.bg/live/?")
+ api_url = "https://btvplus.bg/lbin/v3/btvplus/player_config.php"
media_id_re = re.compile(r"media_id=(\d+)")
src_re = re.compile(r"src: \"(http.*?)\"")
@@ -55,35 +47,19 @@
def can_handle_url(cls, url):
return cls.url_re.match(url) is not None
- def login(self, username, password):
- res = self.session.http.post(self.login_url, data={"username": username, "password": password})
- if "success_logged_in" in res.text:
- return True
- else:
- return False
-
def get_hls_url(self, media_id):
res = self.session.http.get(self.api_url, params=dict(media_id=media_id))
- try:
- return parse_json(res.text, schema=self.api_schema)
- except PluginError:
- return
+ return parse_json(res.text, schema=self.api_schema)
def _get_streams(self):
- if not self.options.get("username") or not self.options.get("password"):
- self.logger.error("BTV requires registration, set the username and password"
- " with --btv-username and --btv-password")
- elif self.login(self.options.get("username"), self.options.get("password")):
- res = self.session.http.get(self.url)
- media_match = self.media_id_re.search(res.text)
- media_id = media_match and media_match.group(1)
- if media_id:
- self.logger.debug("Found media id: {0}", media_id)
- stream_url = self.get_hls_url(media_id)
- if stream_url:
- return HLSStream.parse_variant_playlist(self.session, stream_url)
- else:
- self.logger.error("Login failed, a valid username and password is required")
+ res = self.session.http.get(self.url)
+ media_match = self.media_id_re.search(res.text)
+ media_id = media_match and media_match.group(1)
+ if media_id:
+ log.debug("Found media id: {0}", media_id)
+ stream_url = self.get_hls_url(media_id)
+ if stream_url:
+ return HLSStream.parse_variant_playlist(self.session, stream_url)
__plugin__ = BTV
| {"golden_diff": "diff --git a/src/streamlink/plugins/btv.py b/src/streamlink/plugins/btv.py\n--- a/src/streamlink/plugins/btv.py\n+++ b/src/streamlink/plugins/btv.py\n@@ -1,38 +1,30 @@\n-from __future__ import print_function\n+import argparse\n+import logging\n import re\n \n-from streamlink import PluginError\n-from streamlink.plugin import Plugin\n+from streamlink.plugin import Plugin, PluginArguments, PluginArgument\n from streamlink.plugin.api import validate\n from streamlink.stream import HLSStream\n from streamlink.utils import parse_json\n-from streamlink.plugin import PluginArgument, PluginArguments\n+\n+log = logging.getLogger(__name__)\n \n \n class BTV(Plugin):\n arguments = PluginArguments(\n PluginArgument(\n \"username\",\n- metavar=\"USERNAME\",\n- requires=[\"password\"],\n- help=\"\"\"\n- A BTV username required to access any stream.\n- \"\"\"\n+ help=argparse.SUPPRESS\n ),\n PluginArgument(\n \"password\",\n sensitive=True,\n- metavar=\"PASSWORD\",\n- help=\"\"\"\n- A BTV account password to use with --btv-username.\n- \"\"\"\n+ help=argparse.SUPPRESS\n )\n )\n- url_re = re.compile(r\"https?://(?:www\\.)?btv\\.bg/live/?\")\n \n- api_url = \"http://www.btv.bg/lbin/global/player_config.php\"\n- check_login_url = \"http://www.btv.bg/lbin/userRegistration/check_user_login.php\"\n- login_url = \"https://www.btv.bg/bin/registration2/login.php?action=login&settings=0\"\n+ url_re = re.compile(r\"https?://(?:www\\.)?btvplus\\.bg/live/?\")\n+ api_url = \"https://btvplus.bg/lbin/v3/btvplus/player_config.php\"\n \n media_id_re = re.compile(r\"media_id=(\\d+)\")\n src_re = re.compile(r\"src: \\\"(http.*?)\\\"\")\n@@ -55,35 +47,19 @@\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n \n- def login(self, username, password):\n- res = self.session.http.post(self.login_url, data={\"username\": username, \"password\": password})\n- if \"success_logged_in\" in res.text:\n- return True\n- else:\n- return False\n-\n def get_hls_url(self, media_id):\n res = self.session.http.get(self.api_url, params=dict(media_id=media_id))\n- try:\n- return parse_json(res.text, schema=self.api_schema)\n- except PluginError:\n- return\n+ return parse_json(res.text, schema=self.api_schema)\n \n def _get_streams(self):\n- if not self.options.get(\"username\") or not self.options.get(\"password\"):\n- self.logger.error(\"BTV requires registration, set the username and password\"\n- \" with --btv-username and --btv-password\")\n- elif self.login(self.options.get(\"username\"), self.options.get(\"password\")):\n- res = self.session.http.get(self.url)\n- media_match = self.media_id_re.search(res.text)\n- media_id = media_match and media_match.group(1)\n- if media_id:\n- self.logger.debug(\"Found media id: {0}\", media_id)\n- stream_url = self.get_hls_url(media_id)\n- if stream_url:\n- return HLSStream.parse_variant_playlist(self.session, stream_url)\n- else:\n- self.logger.error(\"Login failed, a valid username and password is required\")\n+ res = self.session.http.get(self.url)\n+ media_match = self.media_id_re.search(res.text)\n+ media_id = media_match and media_match.group(1)\n+ if media_id:\n+ log.debug(\"Found media id: {0}\", media_id)\n+ stream_url = self.get_hls_url(media_id)\n+ if stream_url:\n+ return HLSStream.parse_variant_playlist(self.session, stream_url)\n \n \n __plugin__ = BTV\n", "issue": "[bug] BTV plugin needs updating\n## Bug Report\r\n- [x] This is a bug report and I have read the contribution guidelines.\r\n\r\n\r\n### Description\r\nThe location of the BTV livestream has moved to https://btvplus.bg/live/\r\n**Edit**: Livestreaming no longer requires a user to login, so that can be removed from the plugin info page.\r\n\r\n\r\n### Expected / Actual behavior\r\nStreamlink should be able to handle the link.\r\n\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n1. streamlink https://btvplus.bg/live/ best \r\n2. error: No plugin can handle URL: https://btvplus.bg/live/\n", "before_files": [{"content": "from __future__ import print_function\nimport re\n\nfrom streamlink import PluginError\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\nfrom streamlink.utils import parse_json\nfrom streamlink.plugin import PluginArgument, PluginArguments\n\n\nclass BTV(Plugin):\n arguments = PluginArguments(\n PluginArgument(\n \"username\",\n metavar=\"USERNAME\",\n requires=[\"password\"],\n help=\"\"\"\n A BTV username required to access any stream.\n \"\"\"\n ),\n PluginArgument(\n \"password\",\n sensitive=True,\n metavar=\"PASSWORD\",\n help=\"\"\"\n A BTV account password to use with --btv-username.\n \"\"\"\n )\n )\n url_re = re.compile(r\"https?://(?:www\\.)?btv\\.bg/live/?\")\n\n api_url = \"http://www.btv.bg/lbin/global/player_config.php\"\n check_login_url = \"http://www.btv.bg/lbin/userRegistration/check_user_login.php\"\n login_url = \"https://www.btv.bg/bin/registration2/login.php?action=login&settings=0\"\n\n media_id_re = re.compile(r\"media_id=(\\d+)\")\n src_re = re.compile(r\"src: \\\"(http.*?)\\\"\")\n api_schema = validate.Schema(\n validate.all(\n {\"status\": \"ok\", \"config\": validate.text},\n validate.get(\"config\"),\n validate.all(\n validate.transform(src_re.search),\n validate.any(\n None,\n validate.get(1),\n validate.url()\n )\n )\n )\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def login(self, username, password):\n res = self.session.http.post(self.login_url, data={\"username\": username, \"password\": password})\n if \"success_logged_in\" in res.text:\n return True\n else:\n return False\n\n def get_hls_url(self, media_id):\n res = self.session.http.get(self.api_url, params=dict(media_id=media_id))\n try:\n return parse_json(res.text, schema=self.api_schema)\n except PluginError:\n return\n\n def _get_streams(self):\n if not self.options.get(\"username\") or not self.options.get(\"password\"):\n self.logger.error(\"BTV requires registration, set the username and password\"\n \" with --btv-username and --btv-password\")\n elif self.login(self.options.get(\"username\"), self.options.get(\"password\")):\n res = self.session.http.get(self.url)\n media_match = self.media_id_re.search(res.text)\n media_id = media_match and media_match.group(1)\n if media_id:\n self.logger.debug(\"Found media id: {0}\", media_id)\n stream_url = self.get_hls_url(media_id)\n if stream_url:\n return HLSStream.parse_variant_playlist(self.session, stream_url)\n else:\n self.logger.error(\"Login failed, a valid username and password is required\")\n\n\n__plugin__ = BTV\n", "path": "src/streamlink/plugins/btv.py"}], "after_files": [{"content": "import argparse\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, PluginArguments, PluginArgument\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\nfrom streamlink.utils import parse_json\n\nlog = logging.getLogger(__name__)\n\n\nclass BTV(Plugin):\n arguments = PluginArguments(\n PluginArgument(\n \"username\",\n help=argparse.SUPPRESS\n ),\n PluginArgument(\n \"password\",\n sensitive=True,\n help=argparse.SUPPRESS\n )\n )\n\n url_re = re.compile(r\"https?://(?:www\\.)?btvplus\\.bg/live/?\")\n api_url = \"https://btvplus.bg/lbin/v3/btvplus/player_config.php\"\n\n media_id_re = re.compile(r\"media_id=(\\d+)\")\n src_re = re.compile(r\"src: \\\"(http.*?)\\\"\")\n api_schema = validate.Schema(\n validate.all(\n {\"status\": \"ok\", \"config\": validate.text},\n validate.get(\"config\"),\n validate.all(\n validate.transform(src_re.search),\n validate.any(\n None,\n validate.get(1),\n validate.url()\n )\n )\n )\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def get_hls_url(self, media_id):\n res = self.session.http.get(self.api_url, params=dict(media_id=media_id))\n return parse_json(res.text, schema=self.api_schema)\n\n def _get_streams(self):\n res = self.session.http.get(self.url)\n media_match = self.media_id_re.search(res.text)\n media_id = media_match and media_match.group(1)\n if media_id:\n log.debug(\"Found media id: {0}\", media_id)\n stream_url = self.get_hls_url(media_id)\n if stream_url:\n return HLSStream.parse_variant_playlist(self.session, stream_url)\n\n\n__plugin__ = BTV\n", "path": "src/streamlink/plugins/btv.py"}]} | 1,231 | 890 |
gh_patches_debug_27700 | rasdani/github-patches | git_diff | streamlink__streamlink-5742 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.atresplayer: Error -3 while decompressing data: incorrect header check
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
streamlink 6.4.2
### Description
Possible change in link decoding.
### Debug log
```text
[cli][debug] OS: Windows 10
[cli][debug] Python: 3.11.6
[cli][debug] OpenSSL: OpenSSL 3.0.11 19 Sep 2023
[cli][debug] Streamlink: 6.4.2
[cli][debug] Dependencies:
[cli][debug] certifi: 2023.11.17
[cli][debug] isodate: 0.6.1
[cli][debug] lxml: 4.9.3
[cli][debug] pycountry: 22.3.5
[cli][debug] pycryptodome: 3.19.0
[cli][debug] PySocks: 1.7.1
[cli][debug] requests: 2.31.0
[cli][debug] trio: 0.23.1
[cli][debug] trio-websocket: 0.11.1
[cli][debug] typing-extensions: 4.8.0
[cli][debug] urllib3: 2.1.0
[cli][debug] websocket-client: 1.6.4
[cli][debug] Arguments:
[cli][debug] url=https://www.atresplayer.com/directos/antena3/
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][debug] --ffmpeg-ffmpeg=C:\Program Files\Streamlink\ffmpeg\ffmpeg.exe
[cli][info] Found matching plugin atresplayer for URL https://www.atresplayer.com/directos/antena3/
[plugins.atresplayer][debug] Player API URL: https://api.atresplayer.com/player/v1/live/5a6a165a7ed1a834493ebf6a
[plugins.atresplayer][debug] Stream source: https://directo.atresmedia.com/49aa0979c14a4113668984aa8f6f7a43dd3a624a_1701338572/antena3/master.m3u8 (application/vnd.apple.mpegurl)
[utils.l10n][debug] Language code: es_ES
error: Unable to open URL: https://directo.atresmedia.com/49aa0979c14a4113668984aa8f6f7a43dd3a624a_1701338572/antena3/master.m3u8 (('Received response with content-encoding: gzip, but failed to decode it.', error('Error -3 while decompressing data: incorrect header check')))
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/atresplayer.py`
Content:
```
1 """
2 $description Spanish live TV channels from Atresmedia Television, including Antena 3 and laSexta.
3 $url atresplayer.com
4 $type live
5 $region Spain
6 """
7
8 import logging
9 import re
10
11 from streamlink.plugin import Plugin, pluginmatcher
12 from streamlink.plugin.api import validate
13 from streamlink.stream.dash import DASHStream
14 from streamlink.stream.hls import HLSStream
15 from streamlink.utils.url import update_scheme
16
17
18 log = logging.getLogger(__name__)
19
20
21 @pluginmatcher(re.compile(
22 r"https?://(?:www\.)?atresplayer\.com/directos/.+",
23 ))
24 class AtresPlayer(Plugin):
25 _channels_api_url = "https://api.atresplayer.com/client/v1/info/channels"
26 _player_api_url = "https://api.atresplayer.com/player/v1/live/{channel_id}"
27
28 def __init__(self, *args, **kwargs):
29 super().__init__(*args, **kwargs)
30 self.url = update_scheme("https://", f"{self.url.rstrip('/')}/")
31
32 def _get_streams(self):
33 channel_path = f"/{self.url.split('/')[-2]}/"
34 channel_data = self.session.http.get(self._channels_api_url, schema=validate.Schema(
35 validate.parse_json(),
36 [{
37 "id": str,
38 "link": {"url": str},
39 }],
40 validate.filter(lambda item: item["link"]["url"] == channel_path),
41 ))
42 if not channel_data:
43 return
44 channel_id = channel_data[0]["id"]
45
46 player_api_url = self._player_api_url.format(channel_id=channel_id)
47 log.debug(f"Player API URL: {player_api_url}")
48
49 sources = self.session.http.get(player_api_url, acceptable_status=(200, 403), schema=validate.Schema(
50 validate.parse_json(),
51 validate.any(
52 {
53 "error": str,
54 "error_description": str,
55 },
56 {
57 "sources": [
58 validate.all(
59 {
60 "src": validate.url(),
61 validate.optional("type"): str,
62 },
63 validate.union_get("type", "src"),
64 ),
65 ],
66 },
67 ),
68 ))
69 if "error" in sources:
70 log.error(f"Player API error: {sources['error']} - {sources['error_description']}")
71 return
72
73 for streamtype, streamsrc in sources.get("sources"):
74 log.debug(f"Stream source: {streamsrc} ({streamtype or 'n/a'})")
75
76 if streamtype == "application/vnd.apple.mpegurl":
77 streams = HLSStream.parse_variant_playlist(self.session, streamsrc)
78 if not streams:
79 yield "live", HLSStream(self.session, streamsrc)
80 else:
81 yield from streams.items()
82 elif streamtype == "application/dash+xml":
83 yield from DASHStream.parse_manifest(self.session, streamsrc).items()
84
85
86 __plugin__ = AtresPlayer
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/atresplayer.py b/src/streamlink/plugins/atresplayer.py
--- a/src/streamlink/plugins/atresplayer.py
+++ b/src/streamlink/plugins/atresplayer.py
@@ -23,7 +23,7 @@
))
class AtresPlayer(Plugin):
_channels_api_url = "https://api.atresplayer.com/client/v1/info/channels"
- _player_api_url = "https://api.atresplayer.com/player/v1/live/{channel_id}"
+ _player_api_url = "https://api.atresplayer.com/player/v1/live/{channel_id}?NODRM=true"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@@ -54,7 +54,7 @@
"error_description": str,
},
{
- "sources": [
+ "sourcesLive": [
validate.all(
{
"src": validate.url(),
@@ -70,7 +70,7 @@
log.error(f"Player API error: {sources['error']} - {sources['error_description']}")
return
- for streamtype, streamsrc in sources.get("sources"):
+ for streamtype, streamsrc in sources.get("sourcesLive"):
log.debug(f"Stream source: {streamsrc} ({streamtype or 'n/a'})")
if streamtype == "application/vnd.apple.mpegurl":
| {"golden_diff": "diff --git a/src/streamlink/plugins/atresplayer.py b/src/streamlink/plugins/atresplayer.py\n--- a/src/streamlink/plugins/atresplayer.py\n+++ b/src/streamlink/plugins/atresplayer.py\n@@ -23,7 +23,7 @@\n ))\n class AtresPlayer(Plugin):\n _channels_api_url = \"https://api.atresplayer.com/client/v1/info/channels\"\n- _player_api_url = \"https://api.atresplayer.com/player/v1/live/{channel_id}\"\n+ _player_api_url = \"https://api.atresplayer.com/player/v1/live/{channel_id}?NODRM=true\"\n \n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n@@ -54,7 +54,7 @@\n \"error_description\": str,\n },\n {\n- \"sources\": [\n+ \"sourcesLive\": [\n validate.all(\n {\n \"src\": validate.url(),\n@@ -70,7 +70,7 @@\n log.error(f\"Player API error: {sources['error']} - {sources['error_description']}\")\n return\n \n- for streamtype, streamsrc in sources.get(\"sources\"):\n+ for streamtype, streamsrc in sources.get(\"sourcesLive\"):\n log.debug(f\"Stream source: {streamsrc} ({streamtype or 'n/a'})\")\n \n if streamtype == \"application/vnd.apple.mpegurl\":\n", "issue": "plugins.atresplayer: Error -3 while decompressing data: incorrect header check\n### Checklist\n\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nstreamlink 6.4.2\n\n### Description\n\nPossible change in link decoding.\n\n### Debug log\n\n```text\n[cli][debug] OS: Windows 10\r\n[cli][debug] Python: 3.11.6\r\n[cli][debug] OpenSSL: OpenSSL 3.0.11 19 Sep 2023\r\n[cli][debug] Streamlink: 6.4.2\r\n[cli][debug] Dependencies:\r\n[cli][debug] certifi: 2023.11.17\r\n[cli][debug] isodate: 0.6.1\r\n[cli][debug] lxml: 4.9.3\r\n[cli][debug] pycountry: 22.3.5\r\n[cli][debug] pycryptodome: 3.19.0\r\n[cli][debug] PySocks: 1.7.1\r\n[cli][debug] requests: 2.31.0\r\n[cli][debug] trio: 0.23.1\r\n[cli][debug] trio-websocket: 0.11.1\r\n[cli][debug] typing-extensions: 4.8.0\r\n[cli][debug] urllib3: 2.1.0\r\n[cli][debug] websocket-client: 1.6.4\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://www.atresplayer.com/directos/antena3/\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][debug] --ffmpeg-ffmpeg=C:\\Program Files\\Streamlink\\ffmpeg\\ffmpeg.exe\r\n[cli][info] Found matching plugin atresplayer for URL https://www.atresplayer.com/directos/antena3/\r\n[plugins.atresplayer][debug] Player API URL: https://api.atresplayer.com/player/v1/live/5a6a165a7ed1a834493ebf6a\r\n[plugins.atresplayer][debug] Stream source: https://directo.atresmedia.com/49aa0979c14a4113668984aa8f6f7a43dd3a624a_1701338572/antena3/master.m3u8 (application/vnd.apple.mpegurl)\r\n[utils.l10n][debug] Language code: es_ES\r\nerror: Unable to open URL: https://directo.atresmedia.com/49aa0979c14a4113668984aa8f6f7a43dd3a624a_1701338572/antena3/master.m3u8 (('Received response with content-encoding: gzip, but failed to decode it.', error('Error -3 while decompressing data: incorrect header check')))\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Spanish live TV channels from Atresmedia Television, including Antena 3 and laSexta.\n$url atresplayer.com\n$type live\n$region Spain\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.dash import DASHStream\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.utils.url import update_scheme\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?atresplayer\\.com/directos/.+\",\n))\nclass AtresPlayer(Plugin):\n _channels_api_url = \"https://api.atresplayer.com/client/v1/info/channels\"\n _player_api_url = \"https://api.atresplayer.com/player/v1/live/{channel_id}\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.url = update_scheme(\"https://\", f\"{self.url.rstrip('/')}/\")\n\n def _get_streams(self):\n channel_path = f\"/{self.url.split('/')[-2]}/\"\n channel_data = self.session.http.get(self._channels_api_url, schema=validate.Schema(\n validate.parse_json(),\n [{\n \"id\": str,\n \"link\": {\"url\": str},\n }],\n validate.filter(lambda item: item[\"link\"][\"url\"] == channel_path),\n ))\n if not channel_data:\n return\n channel_id = channel_data[0][\"id\"]\n\n player_api_url = self._player_api_url.format(channel_id=channel_id)\n log.debug(f\"Player API URL: {player_api_url}\")\n\n sources = self.session.http.get(player_api_url, acceptable_status=(200, 403), schema=validate.Schema(\n validate.parse_json(),\n validate.any(\n {\n \"error\": str,\n \"error_description\": str,\n },\n {\n \"sources\": [\n validate.all(\n {\n \"src\": validate.url(),\n validate.optional(\"type\"): str,\n },\n validate.union_get(\"type\", \"src\"),\n ),\n ],\n },\n ),\n ))\n if \"error\" in sources:\n log.error(f\"Player API error: {sources['error']} - {sources['error_description']}\")\n return\n\n for streamtype, streamsrc in sources.get(\"sources\"):\n log.debug(f\"Stream source: {streamsrc} ({streamtype or 'n/a'})\")\n\n if streamtype == \"application/vnd.apple.mpegurl\":\n streams = HLSStream.parse_variant_playlist(self.session, streamsrc)\n if not streams:\n yield \"live\", HLSStream(self.session, streamsrc)\n else:\n yield from streams.items()\n elif streamtype == \"application/dash+xml\":\n yield from DASHStream.parse_manifest(self.session, streamsrc).items()\n\n\n__plugin__ = AtresPlayer\n", "path": "src/streamlink/plugins/atresplayer.py"}], "after_files": [{"content": "\"\"\"\n$description Spanish live TV channels from Atresmedia Television, including Antena 3 and laSexta.\n$url atresplayer.com\n$type live\n$region Spain\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.dash import DASHStream\nfrom streamlink.stream.hls import HLSStream\nfrom streamlink.utils.url import update_scheme\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?atresplayer\\.com/directos/.+\",\n))\nclass AtresPlayer(Plugin):\n _channels_api_url = \"https://api.atresplayer.com/client/v1/info/channels\"\n _player_api_url = \"https://api.atresplayer.com/player/v1/live/{channel_id}?NODRM=true\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.url = update_scheme(\"https://\", f\"{self.url.rstrip('/')}/\")\n\n def _get_streams(self):\n channel_path = f\"/{self.url.split('/')[-2]}/\"\n channel_data = self.session.http.get(self._channels_api_url, schema=validate.Schema(\n validate.parse_json(),\n [{\n \"id\": str,\n \"link\": {\"url\": str},\n }],\n validate.filter(lambda item: item[\"link\"][\"url\"] == channel_path),\n ))\n if not channel_data:\n return\n channel_id = channel_data[0][\"id\"]\n\n player_api_url = self._player_api_url.format(channel_id=channel_id)\n log.debug(f\"Player API URL: {player_api_url}\")\n\n sources = self.session.http.get(player_api_url, acceptable_status=(200, 403), schema=validate.Schema(\n validate.parse_json(),\n validate.any(\n {\n \"error\": str,\n \"error_description\": str,\n },\n {\n \"sourcesLive\": [\n validate.all(\n {\n \"src\": validate.url(),\n validate.optional(\"type\"): str,\n },\n validate.union_get(\"type\", \"src\"),\n ),\n ],\n },\n ),\n ))\n if \"error\" in sources:\n log.error(f\"Player API error: {sources['error']} - {sources['error_description']}\")\n return\n\n for streamtype, streamsrc in sources.get(\"sourcesLive\"):\n log.debug(f\"Stream source: {streamsrc} ({streamtype or 'n/a'})\")\n\n if streamtype == \"application/vnd.apple.mpegurl\":\n streams = HLSStream.parse_variant_playlist(self.session, streamsrc)\n if not streams:\n yield \"live\", HLSStream(self.session, streamsrc)\n else:\n yield from streams.items()\n elif streamtype == \"application/dash+xml\":\n yield from DASHStream.parse_manifest(self.session, streamsrc).items()\n\n\n__plugin__ = AtresPlayer\n", "path": "src/streamlink/plugins/atresplayer.py"}]} | 1,898 | 310 |
gh_patches_debug_17070 | rasdani/github-patches | git_diff | xonsh__xonsh-341 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
xonsh dies if the prompt raises an exception
If a function in the prompt raises an exception, it kills xonsh. I would expect the error to be displayed, but not kill the shell.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xonsh/base_shell.py`
Content:
```
1 """The base class for xonsh shell"""
2 import os
3 import sys
4 import builtins
5 import traceback
6
7 from xonsh.execer import Execer
8 from xonsh.tools import XonshError, escape_windows_title_string
9 from xonsh.tools import ON_WINDOWS
10 from xonsh.completer import Completer
11 from xonsh.environ import multiline_prompt, format_prompt
12
13
14 class BaseShell(object):
15 """The xonsh shell."""
16
17 def __init__(self, execer, ctx, **kwargs):
18 super().__init__(**kwargs)
19 self.execer = execer
20 self.ctx = ctx
21 self.completer = Completer()
22 self.buffer = []
23 self.need_more_lines = False
24 self.mlprompt = None
25
26 def emptyline(self):
27 """Called when an empty line has been entered."""
28 self.need_more_lines = False
29 self.default('')
30
31 def precmd(self, line):
32 """Called just before execution of line."""
33 return line if self.need_more_lines else line.lstrip()
34
35 def default(self, line):
36 """Implements code execution."""
37 line = line if line.endswith('\n') else line + '\n'
38 code = self.push(line)
39 if code is None:
40 return
41 try:
42 self.execer.exec(code, mode='single', glbs=self.ctx) # no locals
43 except XonshError as e:
44 print(e.args[0], file=sys.stderr)
45 except:
46 _print_exception()
47 if builtins.__xonsh_exit__:
48 return True
49
50 def push(self, line):
51 """Pushes a line onto the buffer and compiles the code in a way that
52 enables multiline input.
53 """
54 code = None
55 self.buffer.append(line)
56 if self.need_more_lines:
57 return code
58 src = ''.join(self.buffer)
59 try:
60 code = self.execer.compile(src,
61 mode='single',
62 glbs=None,
63 locs=self.ctx)
64 self.reset_buffer()
65 except SyntaxError:
66 if line == '\n':
67 self.reset_buffer()
68 _print_exception()
69 return None
70 self.need_more_lines = True
71 except:
72 self.reset_buffer()
73 _print_exception()
74 return None
75 return code
76
77 def reset_buffer(self):
78 """Resets the line buffer."""
79 self.buffer.clear()
80 self.need_more_lines = False
81 self.mlprompt = None
82
83 def settitle(self):
84 """Sets terminal title."""
85 env = builtins.__xonsh_env__
86 term = env.get('TERM', None)
87 if term is None or term == 'linux':
88 return
89 if 'TITLE' in env:
90 t = env['TITLE']
91 else:
92 return
93 t = format_prompt(t)
94 if ON_WINDOWS and 'ANSICON' not in env:
95 t = escape_windows_title_string(t)
96 os.system('title {}'.format(t))
97 else:
98 sys.stdout.write("\x1b]2;{0}\x07".format(t))
99
100 @property
101 def prompt(self):
102 """Obtains the current prompt string."""
103 if self.need_more_lines:
104 if self.mlprompt is None:
105 self.mlprompt = multiline_prompt()
106 return self.mlprompt
107 env = builtins.__xonsh_env__
108 if 'PROMPT' in env:
109 p = env['PROMPT']
110 p = format_prompt(p)
111 else:
112 p = "set '$PROMPT = ...' $ "
113 self.settitle()
114 return p
115
116 def _print_exception():
117 """Print exceptions with/without traceback."""
118 if not 'XONSH_SHOW_TRACEBACK' in builtins.__xonsh_env__:
119 sys.stderr.write('xonsh: For full traceback set: '
120 '$XONSH_SHOW_TRACEBACK=True\n')
121 if builtins.__xonsh_env__.get('XONSH_SHOW_TRACEBACK', False):
122 traceback.print_exc()
123 else:
124 exc_type, exc_value, exc_traceback = sys.exc_info()
125 exception_only = traceback.format_exception_only(exc_type, exc_value)
126 sys.stderr.write(''.join(exception_only))
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xonsh/base_shell.py b/xonsh/base_shell.py
--- a/xonsh/base_shell.py
+++ b/xonsh/base_shell.py
@@ -102,12 +102,19 @@
"""Obtains the current prompt string."""
if self.need_more_lines:
if self.mlprompt is None:
- self.mlprompt = multiline_prompt()
+ try:
+ self.mlprompt = multiline_prompt()
+ except Exception:
+ _print_exception()
+ self.mlprompt = '<multiline prompt error> '
return self.mlprompt
env = builtins.__xonsh_env__
if 'PROMPT' in env:
p = env['PROMPT']
- p = format_prompt(p)
+ try:
+ p = format_prompt(p)
+ except Exception:
+ _print_exception()
else:
p = "set '$PROMPT = ...' $ "
self.settitle()
| {"golden_diff": "diff --git a/xonsh/base_shell.py b/xonsh/base_shell.py\n--- a/xonsh/base_shell.py\n+++ b/xonsh/base_shell.py\n@@ -102,12 +102,19 @@\n \"\"\"Obtains the current prompt string.\"\"\"\n if self.need_more_lines:\n if self.mlprompt is None:\n- self.mlprompt = multiline_prompt()\n+ try:\n+ self.mlprompt = multiline_prompt()\n+ except Exception:\n+ _print_exception()\n+ self.mlprompt = '<multiline prompt error> '\n return self.mlprompt\n env = builtins.__xonsh_env__\n if 'PROMPT' in env:\n p = env['PROMPT']\n- p = format_prompt(p)\n+ try:\n+ p = format_prompt(p)\n+ except Exception:\n+ _print_exception()\n else:\n p = \"set '$PROMPT = ...' $ \"\n self.settitle()\n", "issue": "xonsh dies if the prompt raises an exception\nIf a function in the prompt raises an exception, it kills xonsh. I would expect the error to be displayed, but not kill the shell. \n\n", "before_files": [{"content": "\"\"\"The base class for xonsh shell\"\"\"\nimport os\nimport sys\nimport builtins\nimport traceback\n\nfrom xonsh.execer import Execer\nfrom xonsh.tools import XonshError, escape_windows_title_string\nfrom xonsh.tools import ON_WINDOWS\nfrom xonsh.completer import Completer\nfrom xonsh.environ import multiline_prompt, format_prompt\n\n\nclass BaseShell(object):\n \"\"\"The xonsh shell.\"\"\"\n\n def __init__(self, execer, ctx, **kwargs):\n super().__init__(**kwargs)\n self.execer = execer\n self.ctx = ctx\n self.completer = Completer()\n self.buffer = []\n self.need_more_lines = False\n self.mlprompt = None\n\n def emptyline(self):\n \"\"\"Called when an empty line has been entered.\"\"\"\n self.need_more_lines = False\n self.default('')\n\n def precmd(self, line):\n \"\"\"Called just before execution of line.\"\"\"\n return line if self.need_more_lines else line.lstrip()\n\n def default(self, line):\n \"\"\"Implements code execution.\"\"\"\n line = line if line.endswith('\\n') else line + '\\n'\n code = self.push(line)\n if code is None:\n return\n try:\n self.execer.exec(code, mode='single', glbs=self.ctx) # no locals\n except XonshError as e:\n print(e.args[0], file=sys.stderr)\n except:\n _print_exception()\n if builtins.__xonsh_exit__:\n return True\n\n def push(self, line):\n \"\"\"Pushes a line onto the buffer and compiles the code in a way that\n enables multiline input.\n \"\"\"\n code = None\n self.buffer.append(line)\n if self.need_more_lines:\n return code\n src = ''.join(self.buffer)\n try:\n code = self.execer.compile(src,\n mode='single',\n glbs=None,\n locs=self.ctx)\n self.reset_buffer()\n except SyntaxError:\n if line == '\\n':\n self.reset_buffer()\n _print_exception()\n return None\n self.need_more_lines = True\n except:\n self.reset_buffer()\n _print_exception()\n return None\n return code\n\n def reset_buffer(self):\n \"\"\"Resets the line buffer.\"\"\"\n self.buffer.clear()\n self.need_more_lines = False\n self.mlprompt = None\n\n def settitle(self):\n \"\"\"Sets terminal title.\"\"\"\n env = builtins.__xonsh_env__\n term = env.get('TERM', None)\n if term is None or term == 'linux':\n return\n if 'TITLE' in env:\n t = env['TITLE']\n else:\n return\n t = format_prompt(t)\n if ON_WINDOWS and 'ANSICON' not in env:\n t = escape_windows_title_string(t)\n os.system('title {}'.format(t))\n else:\n sys.stdout.write(\"\\x1b]2;{0}\\x07\".format(t))\n\n @property\n def prompt(self):\n \"\"\"Obtains the current prompt string.\"\"\"\n if self.need_more_lines:\n if self.mlprompt is None:\n self.mlprompt = multiline_prompt()\n return self.mlprompt\n env = builtins.__xonsh_env__\n if 'PROMPT' in env:\n p = env['PROMPT']\n p = format_prompt(p)\n else:\n p = \"set '$PROMPT = ...' $ \"\n self.settitle()\n return p\n \ndef _print_exception():\n \"\"\"Print exceptions with/without traceback.\"\"\"\n if not 'XONSH_SHOW_TRACEBACK' in builtins.__xonsh_env__:\n sys.stderr.write('xonsh: For full traceback set: '\n '$XONSH_SHOW_TRACEBACK=True\\n')\n if builtins.__xonsh_env__.get('XONSH_SHOW_TRACEBACK', False):\n traceback.print_exc()\n else:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n exception_only = traceback.format_exception_only(exc_type, exc_value)\n sys.stderr.write(''.join(exception_only))\n", "path": "xonsh/base_shell.py"}], "after_files": [{"content": "\"\"\"The base class for xonsh shell\"\"\"\nimport os\nimport sys\nimport builtins\nimport traceback\n\nfrom xonsh.execer import Execer\nfrom xonsh.tools import XonshError, escape_windows_title_string\nfrom xonsh.tools import ON_WINDOWS\nfrom xonsh.completer import Completer\nfrom xonsh.environ import multiline_prompt, format_prompt\n\n\nclass BaseShell(object):\n \"\"\"The xonsh shell.\"\"\"\n\n def __init__(self, execer, ctx, **kwargs):\n super().__init__(**kwargs)\n self.execer = execer\n self.ctx = ctx\n self.completer = Completer()\n self.buffer = []\n self.need_more_lines = False\n self.mlprompt = None\n\n def emptyline(self):\n \"\"\"Called when an empty line has been entered.\"\"\"\n self.need_more_lines = False\n self.default('')\n\n def precmd(self, line):\n \"\"\"Called just before execution of line.\"\"\"\n return line if self.need_more_lines else line.lstrip()\n\n def default(self, line):\n \"\"\"Implements code execution.\"\"\"\n line = line if line.endswith('\\n') else line + '\\n'\n code = self.push(line)\n if code is None:\n return\n try:\n self.execer.exec(code, mode='single', glbs=self.ctx) # no locals\n except XonshError as e:\n print(e.args[0], file=sys.stderr)\n except:\n _print_exception()\n if builtins.__xonsh_exit__:\n return True\n\n def push(self, line):\n \"\"\"Pushes a line onto the buffer and compiles the code in a way that\n enables multiline input.\n \"\"\"\n code = None\n self.buffer.append(line)\n if self.need_more_lines:\n return code\n src = ''.join(self.buffer)\n try:\n code = self.execer.compile(src,\n mode='single',\n glbs=None,\n locs=self.ctx)\n self.reset_buffer()\n except SyntaxError:\n if line == '\\n':\n self.reset_buffer()\n _print_exception()\n return None\n self.need_more_lines = True\n except:\n self.reset_buffer()\n _print_exception()\n return None\n return code\n\n def reset_buffer(self):\n \"\"\"Resets the line buffer.\"\"\"\n self.buffer.clear()\n self.need_more_lines = False\n self.mlprompt = None\n\n def settitle(self):\n \"\"\"Sets terminal title.\"\"\"\n env = builtins.__xonsh_env__\n term = env.get('TERM', None)\n if term is None or term == 'linux':\n return\n if 'TITLE' in env:\n t = env['TITLE']\n else:\n return\n t = format_prompt(t)\n if ON_WINDOWS and 'ANSICON' not in env:\n t = escape_windows_title_string(t)\n os.system('title {}'.format(t))\n else:\n sys.stdout.write(\"\\x1b]2;{0}\\x07\".format(t))\n\n @property\n def prompt(self):\n \"\"\"Obtains the current prompt string.\"\"\"\n if self.need_more_lines:\n if self.mlprompt is None:\n try:\n self.mlprompt = multiline_prompt()\n except Exception:\n _print_exception()\n self.mlprompt = '<multiline prompt error> '\n return self.mlprompt\n env = builtins.__xonsh_env__\n if 'PROMPT' in env:\n p = env['PROMPT']\n try:\n p = format_prompt(p)\n except Exception:\n _print_exception()\n else:\n p = \"set '$PROMPT = ...' $ \"\n self.settitle()\n return p\n \ndef _print_exception():\n \"\"\"Print exceptions with/without traceback.\"\"\"\n if not 'XONSH_SHOW_TRACEBACK' in builtins.__xonsh_env__:\n sys.stderr.write('xonsh: For full traceback set: '\n '$XONSH_SHOW_TRACEBACK=True\\n')\n if builtins.__xonsh_env__.get('XONSH_SHOW_TRACEBACK', False):\n traceback.print_exc()\n else:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n exception_only = traceback.format_exception_only(exc_type, exc_value)\n sys.stderr.write(''.join(exception_only))\n", "path": "xonsh/base_shell.py"}]} | 1,480 | 217 |
gh_patches_debug_36944 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-903 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Password reset after docker container restarted
*Copy from old repository*: https://github.com/jonaswinkler/paperless-ng/issues/1511
**Describe the bug**
I deployed Paperless-NG in TrueNAS via the TrueCharts integration. TrueCharts uses the official docker container and passes environment variables to configure the superuser.
I changed the admin password in the Django admin interface. However, after redeploying the application (for example due to an update) the password gets overridden by the initial password passed via environment variable.
**To Reproduce**
Steps to reproduce the behavior:
1. Deploy Paperless with credentials admin//secret
2. Open Paperless
3. Navigate to admin interface
4. Change password to "mysupersecretpassword"
5. Restart/update the docker container
6. Navigate to Paperless and try to login with admin/mysupersecretpassword
7. You can't login.
**Expected behavior**
The admin password should not be overridden by the initial password.
**Relevant information**
- Version
- Installation method: **docker**
- Any configuration changes you made in `docker-compose.yml`, `docker-compose.env` or `paperless.conf`. -
I think this is related to the admin user password reset when the docker container is started:
docker-entrypoint.sh calls docker-prepare.sh calls the manage_superuser mgmt command and there the password is updated:
https://github.com/jonaswinkler/paperless-ng/blob/master/src/documents/management/commands/manage_superuser.py#L29
Am I missing something?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/documents/management/commands/manage_superuser.py`
Content:
```
1 import logging
2 import os
3
4 from django.contrib.auth.models import User
5 from django.core.management.base import BaseCommand
6
7
8 logger = logging.getLogger("paperless.management.superuser")
9
10
11 class Command(BaseCommand):
12
13 help = """
14 Creates a Django superuser based on env variables.
15 """.replace(
16 " ",
17 "",
18 )
19
20 def handle(self, *args, **options):
21
22 username = os.getenv("PAPERLESS_ADMIN_USER")
23 if not username:
24 return
25
26 mail = os.getenv("PAPERLESS_ADMIN_MAIL", "root@localhost")
27 password = os.getenv("PAPERLESS_ADMIN_PASSWORD")
28
29 # Check if user exists already, leave as is if it does
30 if User.objects.filter(username=username).exists():
31 user: User = User.objects.get_by_natural_key(username)
32 user.set_password(password)
33 user.save()
34 self.stdout.write(f"Changed password of user {username}.")
35 elif password:
36 # Create superuser based on env variables
37 User.objects.create_superuser(username, mail, password)
38 self.stdout.write(f'Created superuser "{username}" with provided password.')
39 else:
40 self.stdout.write(f'Did not create superuser "{username}".')
41 self.stdout.write(
42 'Make sure you specified "PAPERLESS_ADMIN_PASSWORD" in your '
43 '"docker-compose.env" file.',
44 )
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/documents/management/commands/manage_superuser.py b/src/documents/management/commands/manage_superuser.py
--- a/src/documents/management/commands/manage_superuser.py
+++ b/src/documents/management/commands/manage_superuser.py
@@ -11,7 +11,14 @@
class Command(BaseCommand):
help = """
- Creates a Django superuser based on env variables.
+ Creates a Django superuser:
+ User named: admin
+ Email: root@localhost
+ with password based on env variable.
+ No superuser will be created, when:
+ - The username is taken already exists
+ - A superuser already exists
+ - PAPERLESS_ADMIN_PASSWORD is not set
""".replace(
" ",
"",
@@ -19,26 +26,41 @@
def handle(self, *args, **options):
- username = os.getenv("PAPERLESS_ADMIN_USER")
- if not username:
- return
-
+ username = os.getenv("PAPERLESS_ADMIN_USER", "admin")
mail = os.getenv("PAPERLESS_ADMIN_MAIL", "root@localhost")
password = os.getenv("PAPERLESS_ADMIN_PASSWORD")
- # Check if user exists already, leave as is if it does
+ # Check if there's already a user called admin
if User.objects.filter(username=username).exists():
- user: User = User.objects.get_by_natural_key(username)
- user.set_password(password)
- user.save()
- self.stdout.write(f"Changed password of user {username}.")
- elif password:
- # Create superuser based on env variables
- User.objects.create_superuser(username, mail, password)
- self.stdout.write(f'Created superuser "{username}" with provided password.')
+ self.stdout.write(
+ self.style.NOTICE(
+ f"Did not create superuser, a user {username} already exists",
+ ),
+ )
+ return
+
+ # Check if any superuseruser
+ # exists already, leave as is if it does
+ if User.objects.filter(is_superuser=True).count() > 0:
+ self.stdout.write(
+ self.style.NOTICE(
+ "Did not create superuser, the DB already contains superusers",
+ ),
+ )
+ return
+
+ if password is None:
+ self.stdout.write(
+ self.style.ERROR(
+ "Please check if PAPERLESS_ADMIN_PASSWORD has been"
+ " set in the environment",
+ ),
+ )
else:
- self.stdout.write(f'Did not create superuser "{username}".')
+ # Create superuser with password based on env variable
+ User.objects.create_superuser(username, mail, password)
self.stdout.write(
- 'Make sure you specified "PAPERLESS_ADMIN_PASSWORD" in your '
- '"docker-compose.env" file.',
+ self.style.SUCCESS(
+ f'Created superuser "{username}" with provided password.',
+ ),
)
| {"golden_diff": "diff --git a/src/documents/management/commands/manage_superuser.py b/src/documents/management/commands/manage_superuser.py\n--- a/src/documents/management/commands/manage_superuser.py\n+++ b/src/documents/management/commands/manage_superuser.py\n@@ -11,7 +11,14 @@\n class Command(BaseCommand):\n \n help = \"\"\"\n- Creates a Django superuser based on env variables.\n+ Creates a Django superuser:\n+ User named: admin\n+ Email: root@localhost\n+ with password based on env variable.\n+ No superuser will be created, when:\n+ - The username is taken already exists\n+ - A superuser already exists\n+ - PAPERLESS_ADMIN_PASSWORD is not set\n \"\"\".replace(\n \" \",\n \"\",\n@@ -19,26 +26,41 @@\n \n def handle(self, *args, **options):\n \n- username = os.getenv(\"PAPERLESS_ADMIN_USER\")\n- if not username:\n- return\n-\n+ username = os.getenv(\"PAPERLESS_ADMIN_USER\", \"admin\")\n mail = os.getenv(\"PAPERLESS_ADMIN_MAIL\", \"root@localhost\")\n password = os.getenv(\"PAPERLESS_ADMIN_PASSWORD\")\n \n- # Check if user exists already, leave as is if it does\n+ # Check if there's already a user called admin\n if User.objects.filter(username=username).exists():\n- user: User = User.objects.get_by_natural_key(username)\n- user.set_password(password)\n- user.save()\n- self.stdout.write(f\"Changed password of user {username}.\")\n- elif password:\n- # Create superuser based on env variables\n- User.objects.create_superuser(username, mail, password)\n- self.stdout.write(f'Created superuser \"{username}\" with provided password.')\n+ self.stdout.write(\n+ self.style.NOTICE(\n+ f\"Did not create superuser, a user {username} already exists\",\n+ ),\n+ )\n+ return\n+\n+ # Check if any superuseruser\n+ # exists already, leave as is if it does\n+ if User.objects.filter(is_superuser=True).count() > 0:\n+ self.stdout.write(\n+ self.style.NOTICE(\n+ \"Did not create superuser, the DB already contains superusers\",\n+ ),\n+ )\n+ return\n+\n+ if password is None:\n+ self.stdout.write(\n+ self.style.ERROR(\n+ \"Please check if PAPERLESS_ADMIN_PASSWORD has been\"\n+ \" set in the environment\",\n+ ),\n+ )\n else:\n- self.stdout.write(f'Did not create superuser \"{username}\".')\n+ # Create superuser with password based on env variable\n+ User.objects.create_superuser(username, mail, password)\n self.stdout.write(\n- 'Make sure you specified \"PAPERLESS_ADMIN_PASSWORD\" in your '\n- '\"docker-compose.env\" file.',\n+ self.style.SUCCESS(\n+ f'Created superuser \"{username}\" with provided password.',\n+ ),\n )\n", "issue": "[BUG] Password reset after docker container restarted\n*Copy from old repository*: https://github.com/jonaswinkler/paperless-ng/issues/1511\r\n\r\n**Describe the bug**\r\nI deployed Paperless-NG in TrueNAS via the TrueCharts integration. TrueCharts uses the official docker container and passes environment variables to configure the superuser.\r\n\r\nI changed the admin password in the Django admin interface. However, after redeploying the application (for example due to an update) the password gets overridden by the initial password passed via environment variable.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Deploy Paperless with credentials admin//secret\r\n2. Open Paperless\r\n3. Navigate to admin interface\r\n4. Change password to \"mysupersecretpassword\"\r\n5. Restart/update the docker container\r\n6. Navigate to Paperless and try to login with admin/mysupersecretpassword\r\n7. You can't login.\r\n\r\n**Expected behavior**\r\nThe admin password should not be overridden by the initial password.\r\n\r\n**Relevant information**\r\n - Version \r\n - Installation method: **docker**\r\n - Any configuration changes you made in `docker-compose.yml`, `docker-compose.env` or `paperless.conf`. -\r\n\r\nI think this is related to the admin user password reset when the docker container is started:\r\ndocker-entrypoint.sh calls docker-prepare.sh calls the manage_superuser mgmt command and there the password is updated:\r\nhttps://github.com/jonaswinkler/paperless-ng/blob/master/src/documents/management/commands/manage_superuser.py#L29\r\n\r\nAm I missing something?\n", "before_files": [{"content": "import logging\nimport os\n\nfrom django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\n\n\nlogger = logging.getLogger(\"paperless.management.superuser\")\n\n\nclass Command(BaseCommand):\n\n help = \"\"\"\n Creates a Django superuser based on env variables.\n \"\"\".replace(\n \" \",\n \"\",\n )\n\n def handle(self, *args, **options):\n\n username = os.getenv(\"PAPERLESS_ADMIN_USER\")\n if not username:\n return\n\n mail = os.getenv(\"PAPERLESS_ADMIN_MAIL\", \"root@localhost\")\n password = os.getenv(\"PAPERLESS_ADMIN_PASSWORD\")\n\n # Check if user exists already, leave as is if it does\n if User.objects.filter(username=username).exists():\n user: User = User.objects.get_by_natural_key(username)\n user.set_password(password)\n user.save()\n self.stdout.write(f\"Changed password of user {username}.\")\n elif password:\n # Create superuser based on env variables\n User.objects.create_superuser(username, mail, password)\n self.stdout.write(f'Created superuser \"{username}\" with provided password.')\n else:\n self.stdout.write(f'Did not create superuser \"{username}\".')\n self.stdout.write(\n 'Make sure you specified \"PAPERLESS_ADMIN_PASSWORD\" in your '\n '\"docker-compose.env\" file.',\n )\n", "path": "src/documents/management/commands/manage_superuser.py"}], "after_files": [{"content": "import logging\nimport os\n\nfrom django.contrib.auth.models import User\nfrom django.core.management.base import BaseCommand\n\n\nlogger = logging.getLogger(\"paperless.management.superuser\")\n\n\nclass Command(BaseCommand):\n\n help = \"\"\"\n Creates a Django superuser:\n User named: admin\n Email: root@localhost\n with password based on env variable.\n No superuser will be created, when:\n - The username is taken already exists\n - A superuser already exists\n - PAPERLESS_ADMIN_PASSWORD is not set\n \"\"\".replace(\n \" \",\n \"\",\n )\n\n def handle(self, *args, **options):\n\n username = os.getenv(\"PAPERLESS_ADMIN_USER\", \"admin\")\n mail = os.getenv(\"PAPERLESS_ADMIN_MAIL\", \"root@localhost\")\n password = os.getenv(\"PAPERLESS_ADMIN_PASSWORD\")\n\n # Check if there's already a user called admin\n if User.objects.filter(username=username).exists():\n self.stdout.write(\n self.style.NOTICE(\n f\"Did not create superuser, a user {username} already exists\",\n ),\n )\n return\n\n # Check if any superuseruser\n # exists already, leave as is if it does\n if User.objects.filter(is_superuser=True).count() > 0:\n self.stdout.write(\n self.style.NOTICE(\n \"Did not create superuser, the DB already contains superusers\",\n ),\n )\n return\n\n if password is None:\n self.stdout.write(\n self.style.ERROR(\n \"Please check if PAPERLESS_ADMIN_PASSWORD has been\"\n \" set in the environment\",\n ),\n )\n else:\n # Create superuser with password based on env variable\n User.objects.create_superuser(username, mail, password)\n self.stdout.write(\n self.style.SUCCESS(\n f'Created superuser \"{username}\" with provided password.',\n ),\n )\n", "path": "src/documents/management/commands/manage_superuser.py"}]} | 960 | 658 |
gh_patches_debug_9140 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1155 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
notation typo in Cosine Similarity docs
## 📚 Documentation
There is a typo in the notation for the [pairwise_cosine_similarity](https://torchmetrics.readthedocs.io/en/stable/pairwise/cosine_similarity.html)

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/torchmetrics/functional/pairwise/cosine.py`
Content:
```
1 # Copyright The PyTorch Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Optional
15
16 import torch
17 from torch import Tensor
18 from typing_extensions import Literal
19
20 from torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix
21 from torchmetrics.utilities.compute import _safe_matmul
22
23
24 def _pairwise_cosine_similarity_update(
25 x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None
26 ) -> Tensor:
27 """Calculates the pairwise cosine similarity matrix.
28
29 Args:
30 x: tensor of shape ``[N,d]``
31 y: tensor of shape ``[M,d]``
32 zero_diagonal: determines if the diagonal of the distance matrix should be set to zero
33 """
34 x, y, zero_diagonal = _check_input(x, y, zero_diagonal)
35
36 norm = torch.norm(x, p=2, dim=1)
37 x /= norm.unsqueeze(1)
38 norm = torch.norm(y, p=2, dim=1)
39 y /= norm.unsqueeze(1)
40
41 distance = _safe_matmul(x, y)
42 if zero_diagonal:
43 distance.fill_diagonal_(0)
44 return distance
45
46
47 def pairwise_cosine_similarity(
48 x: Tensor,
49 y: Optional[Tensor] = None,
50 reduction: Literal["mean", "sum", "none", None] = None,
51 zero_diagonal: Optional[bool] = None,
52 ) -> Tensor:
53 r"""Calculates pairwise cosine similarity:
54
55 .. math::
56 s_{cos}(x,y) = \frac{<x,y>}{||x|| \cdot ||y||}
57 = \frac{\sum_{d=1}^D x_d \cdot y_d }{\sqrt{\sum_{d=1}^D x_i^2} \cdot \sqrt{\sum_{d=1}^D x_i^2}}
58
59 If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise
60 between the rows of :math:`x` and :math:`y`.
61 If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.
62
63 Args:
64 x: Tensor with shape ``[N, d]``
65 y: Tensor with shape ``[M, d]``, optional
66 reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`
67 (applied along column dimension) or `'none'`, `None` for no reduction
68 zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only :math:`x` is given
69 this defaults to ``True`` else if :math:`y` is also given it defaults to ``False``
70
71 Returns:
72 A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix
73
74 Example:
75 >>> import torch
76 >>> from torchmetrics.functional import pairwise_cosine_similarity
77 >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)
78 >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)
79 >>> pairwise_cosine_similarity(x, y)
80 tensor([[0.5547, 0.8682],
81 [0.5145, 0.8437],
82 [0.5300, 0.8533]])
83 >>> pairwise_cosine_similarity(x)
84 tensor([[0.0000, 0.9989, 0.9996],
85 [0.9989, 0.0000, 0.9998],
86 [0.9996, 0.9998, 0.0000]])
87
88 """
89 distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal)
90 return _reduce_distance_matrix(distance, reduction)
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/torchmetrics/functional/pairwise/cosine.py b/src/torchmetrics/functional/pairwise/cosine.py
--- a/src/torchmetrics/functional/pairwise/cosine.py
+++ b/src/torchmetrics/functional/pairwise/cosine.py
@@ -54,7 +54,7 @@
.. math::
s_{cos}(x,y) = \frac{<x,y>}{||x|| \cdot ||y||}
- = \frac{\sum_{d=1}^D x_d \cdot y_d }{\sqrt{\sum_{d=1}^D x_i^2} \cdot \sqrt{\sum_{d=1}^D x_i^2}}
+ = \frac{\sum_{d=1}^D x_d \cdot y_d }{\sqrt{\sum_{d=1}^D x_i^2} \cdot \sqrt{\sum_{d=1}^D y_i^2}}
If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise
between the rows of :math:`x` and :math:`y`.
| {"golden_diff": "diff --git a/src/torchmetrics/functional/pairwise/cosine.py b/src/torchmetrics/functional/pairwise/cosine.py\n--- a/src/torchmetrics/functional/pairwise/cosine.py\n+++ b/src/torchmetrics/functional/pairwise/cosine.py\n@@ -54,7 +54,7 @@\n \n .. math::\n s_{cos}(x,y) = \\frac{<x,y>}{||x|| \\cdot ||y||}\n- = \\frac{\\sum_{d=1}^D x_d \\cdot y_d }{\\sqrt{\\sum_{d=1}^D x_i^2} \\cdot \\sqrt{\\sum_{d=1}^D x_i^2}}\n+ = \\frac{\\sum_{d=1}^D x_d \\cdot y_d }{\\sqrt{\\sum_{d=1}^D x_i^2} \\cdot \\sqrt{\\sum_{d=1}^D y_i^2}}\n \n If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise\n between the rows of :math:`x` and :math:`y`.\n", "issue": "notation typo in Cosine Similarity docs \n## \ud83d\udcda Documentation\r\n\r\nThere is a typo in the notation for the [pairwise_cosine_similarity](https://torchmetrics.readthedocs.io/en/stable/pairwise/cosine_similarity.html)\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix\nfrom torchmetrics.utilities.compute import _safe_matmul\n\n\ndef _pairwise_cosine_similarity_update(\n x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None\n) -> Tensor:\n \"\"\"Calculates the pairwise cosine similarity matrix.\n\n Args:\n x: tensor of shape ``[N,d]``\n y: tensor of shape ``[M,d]``\n zero_diagonal: determines if the diagonal of the distance matrix should be set to zero\n \"\"\"\n x, y, zero_diagonal = _check_input(x, y, zero_diagonal)\n\n norm = torch.norm(x, p=2, dim=1)\n x /= norm.unsqueeze(1)\n norm = torch.norm(y, p=2, dim=1)\n y /= norm.unsqueeze(1)\n\n distance = _safe_matmul(x, y)\n if zero_diagonal:\n distance.fill_diagonal_(0)\n return distance\n\n\ndef pairwise_cosine_similarity(\n x: Tensor,\n y: Optional[Tensor] = None,\n reduction: Literal[\"mean\", \"sum\", \"none\", None] = None,\n zero_diagonal: Optional[bool] = None,\n) -> Tensor:\n r\"\"\"Calculates pairwise cosine similarity:\n\n .. math::\n s_{cos}(x,y) = \\frac{<x,y>}{||x|| \\cdot ||y||}\n = \\frac{\\sum_{d=1}^D x_d \\cdot y_d }{\\sqrt{\\sum_{d=1}^D x_i^2} \\cdot \\sqrt{\\sum_{d=1}^D x_i^2}}\n\n If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise\n between the rows of :math:`x` and :math:`y`.\n If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.\n\n Args:\n x: Tensor with shape ``[N, d]``\n y: Tensor with shape ``[M, d]``, optional\n reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`\n (applied along column dimension) or `'none'`, `None` for no reduction\n zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only :math:`x` is given\n this defaults to ``True`` else if :math:`y` is also given it defaults to ``False``\n\n Returns:\n A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix\n\n Example:\n >>> import torch\n >>> from torchmetrics.functional import pairwise_cosine_similarity\n >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)\n >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)\n >>> pairwise_cosine_similarity(x, y)\n tensor([[0.5547, 0.8682],\n [0.5145, 0.8437],\n [0.5300, 0.8533]])\n >>> pairwise_cosine_similarity(x)\n tensor([[0.0000, 0.9989, 0.9996],\n [0.9989, 0.0000, 0.9998],\n [0.9996, 0.9998, 0.0000]])\n\n \"\"\"\n distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal)\n return _reduce_distance_matrix(distance, reduction)\n", "path": "src/torchmetrics/functional/pairwise/cosine.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Optional\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.functional.pairwise.helpers import _check_input, _reduce_distance_matrix\nfrom torchmetrics.utilities.compute import _safe_matmul\n\n\ndef _pairwise_cosine_similarity_update(\n x: Tensor, y: Optional[Tensor] = None, zero_diagonal: Optional[bool] = None\n) -> Tensor:\n \"\"\"Calculates the pairwise cosine similarity matrix.\n\n Args:\n x: tensor of shape ``[N,d]``\n y: tensor of shape ``[M,d]``\n zero_diagonal: determines if the diagonal of the distance matrix should be set to zero\n \"\"\"\n x, y, zero_diagonal = _check_input(x, y, zero_diagonal)\n\n norm = torch.norm(x, p=2, dim=1)\n x /= norm.unsqueeze(1)\n norm = torch.norm(y, p=2, dim=1)\n y /= norm.unsqueeze(1)\n\n distance = _safe_matmul(x, y)\n if zero_diagonal:\n distance.fill_diagonal_(0)\n return distance\n\n\ndef pairwise_cosine_similarity(\n x: Tensor,\n y: Optional[Tensor] = None,\n reduction: Literal[\"mean\", \"sum\", \"none\", None] = None,\n zero_diagonal: Optional[bool] = None,\n) -> Tensor:\n r\"\"\"Calculates pairwise cosine similarity:\n\n .. math::\n s_{cos}(x,y) = \\frac{<x,y>}{||x|| \\cdot ||y||}\n = \\frac{\\sum_{d=1}^D x_d \\cdot y_d }{\\sqrt{\\sum_{d=1}^D x_i^2} \\cdot \\sqrt{\\sum_{d=1}^D y_i^2}}\n\n If both :math:`x` and :math:`y` are passed in, the calculation will be performed pairwise\n between the rows of :math:`x` and :math:`y`.\n If only :math:`x` is passed in, the calculation will be performed between the rows of :math:`x`.\n\n Args:\n x: Tensor with shape ``[N, d]``\n y: Tensor with shape ``[M, d]``, optional\n reduction: reduction to apply along the last dimension. Choose between `'mean'`, `'sum'`\n (applied along column dimension) or `'none'`, `None` for no reduction\n zero_diagonal: if the diagonal of the distance matrix should be set to 0. If only :math:`x` is given\n this defaults to ``True`` else if :math:`y` is also given it defaults to ``False``\n\n Returns:\n A ``[N,N]`` matrix of distances if only ``x`` is given, else a ``[N,M]`` matrix\n\n Example:\n >>> import torch\n >>> from torchmetrics.functional import pairwise_cosine_similarity\n >>> x = torch.tensor([[2, 3], [3, 5], [5, 8]], dtype=torch.float32)\n >>> y = torch.tensor([[1, 0], [2, 1]], dtype=torch.float32)\n >>> pairwise_cosine_similarity(x, y)\n tensor([[0.5547, 0.8682],\n [0.5145, 0.8437],\n [0.5300, 0.8533]])\n >>> pairwise_cosine_similarity(x)\n tensor([[0.0000, 0.9989, 0.9996],\n [0.9989, 0.0000, 0.9998],\n [0.9996, 0.9998, 0.0000]])\n\n \"\"\"\n distance = _pairwise_cosine_similarity_update(x, y, zero_diagonal)\n return _reduce_distance_matrix(distance, reduction)\n", "path": "src/torchmetrics/functional/pairwise/cosine.py"}]} | 1,564 | 259 |
gh_patches_debug_27323 | rasdani/github-patches | git_diff | mindsdb__lightwood-168 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Construct comperhensive test suite to evaluate predictions with missing column
We should have a test suite to evaluate prediction accuracy with missing column.
This should take the form of:
Given `M` columns and a Lightwood model trained with them to predict `y`, the accuracy for `y` when predicting with `M` columns (where `M` is a subset of `N`), should be about equal to or greater than that of a Gradient Boosting Regressor or Classifier trained with just the columns `M` to predict `y`.
The reason we are using a Gradient Booster to determine the benchmark accuracy is that it's safe to assume they are fairly generic (i.e. should get about the same accuracy as a well trained neural network) and fast&easy to train.
We can do this testing in two phases:
First, we can add this as a check to the generate-data tests in lightwood, which should be fairly easy.
Second, we can add these tests to mindsdb_examples, the helpers that are already present in there can help.
I'll be handling this but @torrmal feel free to review the methodology
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/examples/learn_to_classify.py`
Content:
```
1 import lightwood
2 import random
3 import pandas as pd
4 import numpy as np
5 from collections import Counter
6
7
8 random.seed(66)
9 n = 100
10 m = 500
11 train = True
12 nr_inputs = 10
13
14 #options = ['a','b','c','d','e','f','g','h','n','m']
15 options = ['a','b','c']
16
17 data_train = {}
18 data_test = {}
19
20 for data, nr_ele in [(data_train,n), (data_test,m)]:
21 for i in range(nr_inputs):
22 data[f'x_{i}'] = [random.choice(options) for _ in range(nr_ele)]
23
24 data['y'] = [Counter([data[f'x_{i}'][n] for i in range(nr_inputs)]).most_common(1)[0][0] for n in range(nr_ele)]
25
26 data_train = pd.DataFrame(data_train)
27 data_test = pd.DataFrame(data_test)
28
29 def iter_function(epoch, training_error, test_error, test_error_gradient, test_accuracy):
30 print(f'Epoch: {epoch}, Train Error: {training_error}, Test Error: {test_error}, Test Error Gradient: {test_error_gradient}, Test Accuracy: {test_accuracy}')
31
32 if train:
33 predictor = lightwood.Predictor(output=['y'])
34 predictor.learn(from_data=data_train, callback_on_iter=iter_function, eval_every_x_epochs=200)
35 predictor.save('/tmp/ltcrl.pkl')
36
37 predictor = lightwood.Predictor(load_from_path='/tmp/ltcrl.pkl')
38 print('Train accuracy: ', predictor.train_accuracy['y']['value'])
39 print('Test accuracy: ', predictor.calculate_accuracy(from_data=data_test)['y']['value'])
40
41 predictions = predictor.predict(when_data=data_test)
42 print(f'Confidence mean for all columns present ', np.mean(predictions['y']['selfaware_confidences']))
43
44 for i_drop in range(nr_inputs):
45 predictions = predictor.predict(when_data=data_test.drop(columns=[f'x_{i_drop}']))
46 print(f'Accuracy for x_{i_drop} missing: ', predictor.calculate_accuracy(from_data=data_test.drop(columns=[f'x_{i_drop}']))['y']['value'])
47 print(f'Confidence mean for x_{i_drop} missing: ', np.mean(predictions['y']['selfaware_confidences']))
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/examples/learn_to_classify.py b/docs/examples/learn_to_classify.py
--- a/docs/examples/learn_to_classify.py
+++ b/docs/examples/learn_to_classify.py
@@ -34,14 +34,18 @@
predictor.learn(from_data=data_train, callback_on_iter=iter_function, eval_every_x_epochs=200)
predictor.save('/tmp/ltcrl.pkl')
+
predictor = lightwood.Predictor(load_from_path='/tmp/ltcrl.pkl')
print('Train accuracy: ', predictor.train_accuracy['y']['value'])
print('Test accuracy: ', predictor.calculate_accuracy(from_data=data_test)['y']['value'])
-predictions = predictor.predict(when_data=data_test)
+print(f'Accuracy for all columns present: ', predictor.calculate_accuracy(from_data=data_test)['y']['value'])
+
+predictions = predictor.calculate_accuracy(from_data=data_test)
print(f'Confidence mean for all columns present ', np.mean(predictions['y']['selfaware_confidences']))
for i_drop in range(nr_inputs):
- predictions = predictor.predict(when_data=data_test.drop(columns=[f'x_{i_drop}']))
print(f'Accuracy for x_{i_drop} missing: ', predictor.calculate_accuracy(from_data=data_test.drop(columns=[f'x_{i_drop}']))['y']['value'])
+
+ predictions = predictor.calculate_accuracy(from_data=data_test.drop(columns=[f'x_{i_drop}']))
print(f'Confidence mean for x_{i_drop} missing: ', np.mean(predictions['y']['selfaware_confidences']))
| {"golden_diff": "diff --git a/docs/examples/learn_to_classify.py b/docs/examples/learn_to_classify.py\n--- a/docs/examples/learn_to_classify.py\n+++ b/docs/examples/learn_to_classify.py\n@@ -34,14 +34,18 @@\n predictor.learn(from_data=data_train, callback_on_iter=iter_function, eval_every_x_epochs=200)\n predictor.save('/tmp/ltcrl.pkl')\n \n+\n predictor = lightwood.Predictor(load_from_path='/tmp/ltcrl.pkl')\n print('Train accuracy: ', predictor.train_accuracy['y']['value'])\n print('Test accuracy: ', predictor.calculate_accuracy(from_data=data_test)['y']['value'])\n \n-predictions = predictor.predict(when_data=data_test)\n+print(f'Accuracy for all columns present: ', predictor.calculate_accuracy(from_data=data_test)['y']['value'])\n+\n+predictions = predictor.calculate_accuracy(from_data=data_test)\n print(f'Confidence mean for all columns present ', np.mean(predictions['y']['selfaware_confidences']))\n \n for i_drop in range(nr_inputs):\n- predictions = predictor.predict(when_data=data_test.drop(columns=[f'x_{i_drop}']))\n print(f'Accuracy for x_{i_drop} missing: ', predictor.calculate_accuracy(from_data=data_test.drop(columns=[f'x_{i_drop}']))['y']['value'])\n+\n+ predictions = predictor.calculate_accuracy(from_data=data_test.drop(columns=[f'x_{i_drop}']))\n print(f'Confidence mean for x_{i_drop} missing: ', np.mean(predictions['y']['selfaware_confidences']))\n", "issue": "Construct comperhensive test suite to evaluate predictions with missing column\nWe should have a test suite to evaluate prediction accuracy with missing column.\r\n\r\nThis should take the form of:\r\n\r\nGiven `M` columns and a Lightwood model trained with them to predict `y`, the accuracy for `y` when predicting with `M` columns (where `M` is a subset of `N`), should be about equal to or greater than that of a Gradient Boosting Regressor or Classifier trained with just the columns `M` to predict `y`.\r\n\r\nThe reason we are using a Gradient Booster to determine the benchmark accuracy is that it's safe to assume they are fairly generic (i.e. should get about the same accuracy as a well trained neural network) and fast&easy to train.\r\n\r\nWe can do this testing in two phases:\r\n\r\nFirst, we can add this as a check to the generate-data tests in lightwood, which should be fairly easy.\r\n\r\nSecond, we can add these tests to mindsdb_examples, the helpers that are already present in there can help.\r\n\r\nI'll be handling this but @torrmal feel free to review the methodology\n", "before_files": [{"content": "import lightwood\nimport random\nimport pandas as pd\nimport numpy as np\nfrom collections import Counter\n\n\nrandom.seed(66)\nn = 100\nm = 500\ntrain = True\nnr_inputs = 10\n\n#options = ['a','b','c','d','e','f','g','h','n','m']\noptions = ['a','b','c']\n\ndata_train = {}\ndata_test = {}\n\nfor data, nr_ele in [(data_train,n), (data_test,m)]:\n for i in range(nr_inputs):\n data[f'x_{i}'] = [random.choice(options) for _ in range(nr_ele)]\n\n data['y'] = [Counter([data[f'x_{i}'][n] for i in range(nr_inputs)]).most_common(1)[0][0] for n in range(nr_ele)]\n\ndata_train = pd.DataFrame(data_train)\ndata_test = pd.DataFrame(data_test)\n\ndef iter_function(epoch, training_error, test_error, test_error_gradient, test_accuracy):\n print(f'Epoch: {epoch}, Train Error: {training_error}, Test Error: {test_error}, Test Error Gradient: {test_error_gradient}, Test Accuracy: {test_accuracy}')\n\nif train:\n predictor = lightwood.Predictor(output=['y'])\n predictor.learn(from_data=data_train, callback_on_iter=iter_function, eval_every_x_epochs=200)\n predictor.save('/tmp/ltcrl.pkl')\n\npredictor = lightwood.Predictor(load_from_path='/tmp/ltcrl.pkl')\nprint('Train accuracy: ', predictor.train_accuracy['y']['value'])\nprint('Test accuracy: ', predictor.calculate_accuracy(from_data=data_test)['y']['value'])\n\npredictions = predictor.predict(when_data=data_test)\nprint(f'Confidence mean for all columns present ', np.mean(predictions['y']['selfaware_confidences']))\n\nfor i_drop in range(nr_inputs):\n predictions = predictor.predict(when_data=data_test.drop(columns=[f'x_{i_drop}']))\n print(f'Accuracy for x_{i_drop} missing: ', predictor.calculate_accuracy(from_data=data_test.drop(columns=[f'x_{i_drop}']))['y']['value'])\n print(f'Confidence mean for x_{i_drop} missing: ', np.mean(predictions['y']['selfaware_confidences']))\n", "path": "docs/examples/learn_to_classify.py"}], "after_files": [{"content": "import lightwood\nimport random\nimport pandas as pd\nimport numpy as np\nfrom collections import Counter\n\n\nrandom.seed(66)\nn = 100\nm = 500\ntrain = True\nnr_inputs = 10\n\n#options = ['a','b','c','d','e','f','g','h','n','m']\noptions = ['a','b','c']\n\ndata_train = {}\ndata_test = {}\n\nfor data, nr_ele in [(data_train,n), (data_test,m)]:\n for i in range(nr_inputs):\n data[f'x_{i}'] = [random.choice(options) for _ in range(nr_ele)]\n\n data['y'] = [Counter([data[f'x_{i}'][n] for i in range(nr_inputs)]).most_common(1)[0][0] for n in range(nr_ele)]\n\ndata_train = pd.DataFrame(data_train)\ndata_test = pd.DataFrame(data_test)\n\ndef iter_function(epoch, training_error, test_error, test_error_gradient, test_accuracy):\n print(f'Epoch: {epoch}, Train Error: {training_error}, Test Error: {test_error}, Test Error Gradient: {test_error_gradient}, Test Accuracy: {test_accuracy}')\n\nif train:\n predictor = lightwood.Predictor(output=['y'])\n predictor.learn(from_data=data_train, callback_on_iter=iter_function, eval_every_x_epochs=200)\n predictor.save('/tmp/ltcrl.pkl')\n\n\npredictor = lightwood.Predictor(load_from_path='/tmp/ltcrl.pkl')\nprint('Train accuracy: ', predictor.train_accuracy['y']['value'])\nprint('Test accuracy: ', predictor.calculate_accuracy(from_data=data_test)['y']['value'])\n\nprint(f'Accuracy for all columns present: ', predictor.calculate_accuracy(from_data=data_test)['y']['value'])\n\npredictions = predictor.calculate_accuracy(from_data=data_test)\nprint(f'Confidence mean for all columns present ', np.mean(predictions['y']['selfaware_confidences']))\n\nfor i_drop in range(nr_inputs):\n print(f'Accuracy for x_{i_drop} missing: ', predictor.calculate_accuracy(from_data=data_test.drop(columns=[f'x_{i_drop}']))['y']['value'])\n\n predictions = predictor.calculate_accuracy(from_data=data_test.drop(columns=[f'x_{i_drop}']))\n print(f'Confidence mean for x_{i_drop} missing: ', np.mean(predictions['y']['selfaware_confidences']))\n", "path": "docs/examples/learn_to_classify.py"}]} | 1,076 | 333 |
gh_patches_debug_35750 | rasdani/github-patches | git_diff | chainer__chainer-1663 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Test N-dimensional convolution link for dtypes of FP16 and FP64
Follows #1279 and #1556.
Since #1295 is now merged to master, we can add test for dtypes of FP16 and FP64 to N-dimensional convolution **LINK**.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/links/connection/convolution_nd.py`
Content:
```
1 from chainer.functions.connection import convolution_nd
2 from chainer import initializers
3 from chainer import link
4 from chainer.utils import conv_nd
5
6
7 class ConvolutionND(link.Link):
8 """N-dimensional convolution layer.
9
10 This link wraps the :func:`~chainer.functions.convolution_nd` function and
11 holds the filter weight and bias vector as parameters.
12
13 Args:
14 ndim (int): Number of spatial dimensions.
15 in_channels (int): Number of channels of input arrays.
16 out_channels (int): Number of channels of output arrays.
17 ksize (int or tuple of ints): Size of filters (a.k.a. kernels).
18 ``ksize=k`` and ``ksize=(k, k, ..., k)`` are equivalent.
19 stride (int or tuple of ints): Stride of filter application.
20 ``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent.
21 pad (int or tuple of ints): Spatial padding width for input arrays.
22 ``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
23 initialW: Value used to initialize the filter weight. May be an
24 initializer instance or another value that
25 :func:`~chainer.init_weight` helper function can take. This link
26 uses :func:`~chainer.init_weight` to initialize the filter weight
27 and passes the value of ``initialW`` to it as it is.
28 initial_bias: Value used to initialize the bias vector. May be an
29 initializer instance or another value except ``None`` that
30 :func:`~chainer.init_weight` helper function can take. If ``None``
31 is given, this link does not use the bias vector. This link uses
32 :func:`~chainer.init_weight` to initialize the bias vector and
33 passes the value of ``initial_bias`` other than ``None`` to it as
34 it is.
35 use_cudnn (bool): If ``True``, then this link uses cuDNN if available.
36 See :func:`~chainer.functions.convolution_nd` for exact conditions
37 of cuDNN availability.
38 cover_all (bool): If ``True``, all spatial locations are convoluted
39 into some output pixels. It may make the output size larger.
40 ``cover_all`` needs to be ``False`` if you want to use cuDNN.
41
42 .. seealso::
43 See :func:`~chainer.functions.convolution_nd` for the definition of
44 N-dimensional convolution. See
45 :func:`~chainer.functions.convolution_2d` for the definition of
46 two-dimensional convolution.
47
48 Attributes:
49 W (~chainer.Variable): Weight parameter.
50 b (~chainer.Variable): Bias parameter. If ``initial_bias`` is ``None``,
51 set to ``None``.
52
53 """
54
55 def __init__(self, ndim, in_channels, out_channels, ksize, stride=1, pad=0,
56 initialW=None, initial_bias=None, use_cudnn=True,
57 cover_all=False):
58 ksize = conv_nd.as_tuple(ksize, ndim)
59 self.stride = stride
60 self.pad = pad
61 self.use_cudnn = use_cudnn
62 self.cover_all = cover_all
63
64 W_shape = (out_channels, in_channels) + ksize
65 super(ConvolutionND, self).__init__(W=W_shape)
66 initializers.init_weight(self.W.data, initialW)
67
68 if initial_bias is None:
69 self.b = None
70 else:
71 self.add_param('b', out_channels)
72 initializers.init_weight(self.b.data, initial_bias)
73
74 def __call__(self, x):
75 """Applies N-dimensional convolution layer.
76
77 Args:
78 x (~chainer.Variable): Input image.
79
80 Returns:
81 ~chainer.Variable: Output of convolution.
82
83 """
84 return convolution_nd.convolution_nd(
85 x, self.W, self.b, self.stride, self.pad,
86 use_cudnn=self.use_cudnn, cover_all=self.cover_all)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/links/connection/convolution_nd.py b/chainer/links/connection/convolution_nd.py
--- a/chainer/links/connection/convolution_nd.py
+++ b/chainer/links/connection/convolution_nd.py
@@ -22,16 +22,11 @@
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
initialW: Value used to initialize the filter weight. May be an
initializer instance or another value that
- :func:`~chainer.init_weight` helper function can take. This link
- uses :func:`~chainer.init_weight` to initialize the filter weight
- and passes the value of ``initialW`` to it as it is.
+ :func:`~chainer.init_weight` helper function can take.
initial_bias: Value used to initialize the bias vector. May be an
initializer instance or another value except ``None`` that
:func:`~chainer.init_weight` helper function can take. If ``None``
- is given, this link does not use the bias vector. This link uses
- :func:`~chainer.init_weight` to initialize the bias vector and
- passes the value of ``initial_bias`` other than ``None`` to it as
- it is.
+ is given, this link does not use the bias vector.
use_cudnn (bool): If ``True``, then this link uses cuDNN if available.
See :func:`~chainer.functions.convolution_nd` for exact conditions
of cuDNN availability.
@@ -61,15 +56,17 @@
self.use_cudnn = use_cudnn
self.cover_all = cover_all
+ super(ConvolutionND, self).__init__()
+
W_shape = (out_channels, in_channels) + ksize
- super(ConvolutionND, self).__init__(W=W_shape)
- initializers.init_weight(self.W.data, initialW)
+ initialW = initializers._get_initializer(initialW)
+ self.add_param('W', W_shape, initializer=initialW)
if initial_bias is None:
self.b = None
else:
- self.add_param('b', out_channels)
- initializers.init_weight(self.b.data, initial_bias)
+ initial_bias = initializers._get_initializer(initial_bias)
+ self.add_param('b', out_channels, initializer=initial_bias)
def __call__(self, x):
"""Applies N-dimensional convolution layer.
| {"golden_diff": "diff --git a/chainer/links/connection/convolution_nd.py b/chainer/links/connection/convolution_nd.py\n--- a/chainer/links/connection/convolution_nd.py\n+++ b/chainer/links/connection/convolution_nd.py\n@@ -22,16 +22,11 @@\n ``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.\n initialW: Value used to initialize the filter weight. May be an\n initializer instance or another value that\n- :func:`~chainer.init_weight` helper function can take. This link\n- uses :func:`~chainer.init_weight` to initialize the filter weight\n- and passes the value of ``initialW`` to it as it is.\n+ :func:`~chainer.init_weight` helper function can take.\n initial_bias: Value used to initialize the bias vector. May be an\n initializer instance or another value except ``None`` that\n :func:`~chainer.init_weight` helper function can take. If ``None``\n- is given, this link does not use the bias vector. This link uses\n- :func:`~chainer.init_weight` to initialize the bias vector and\n- passes the value of ``initial_bias`` other than ``None`` to it as\n- it is.\n+ is given, this link does not use the bias vector.\n use_cudnn (bool): If ``True``, then this link uses cuDNN if available.\n See :func:`~chainer.functions.convolution_nd` for exact conditions\n of cuDNN availability.\n@@ -61,15 +56,17 @@\n self.use_cudnn = use_cudnn\n self.cover_all = cover_all\n \n+ super(ConvolutionND, self).__init__()\n+\n W_shape = (out_channels, in_channels) + ksize\n- super(ConvolutionND, self).__init__(W=W_shape)\n- initializers.init_weight(self.W.data, initialW)\n+ initialW = initializers._get_initializer(initialW)\n+ self.add_param('W', W_shape, initializer=initialW)\n \n if initial_bias is None:\n self.b = None\n else:\n- self.add_param('b', out_channels)\n- initializers.init_weight(self.b.data, initial_bias)\n+ initial_bias = initializers._get_initializer(initial_bias)\n+ self.add_param('b', out_channels, initializer=initial_bias)\n \n def __call__(self, x):\n \"\"\"Applies N-dimensional convolution layer.\n", "issue": "Test N-dimensional convolution link for dtypes of FP16 and FP64\nFollows #1279 and #1556.\n\nSince #1295 is now merged to master, we can add test for dtypes of FP16 and FP64 to N-dimensional convolution **LINK**.\n\n", "before_files": [{"content": "from chainer.functions.connection import convolution_nd\nfrom chainer import initializers\nfrom chainer import link\nfrom chainer.utils import conv_nd\n\n\nclass ConvolutionND(link.Link):\n \"\"\"N-dimensional convolution layer.\n\n This link wraps the :func:`~chainer.functions.convolution_nd` function and\n holds the filter weight and bias vector as parameters.\n\n Args:\n ndim (int): Number of spatial dimensions.\n in_channels (int): Number of channels of input arrays.\n out_channels (int): Number of channels of output arrays.\n ksize (int or tuple of ints): Size of filters (a.k.a. kernels).\n ``ksize=k`` and ``ksize=(k, k, ..., k)`` are equivalent.\n stride (int or tuple of ints): Stride of filter application.\n ``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent.\n pad (int or tuple of ints): Spatial padding width for input arrays.\n ``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.\n initialW: Value used to initialize the filter weight. May be an\n initializer instance or another value that\n :func:`~chainer.init_weight` helper function can take. This link\n uses :func:`~chainer.init_weight` to initialize the filter weight\n and passes the value of ``initialW`` to it as it is.\n initial_bias: Value used to initialize the bias vector. May be an\n initializer instance or another value except ``None`` that\n :func:`~chainer.init_weight` helper function can take. If ``None``\n is given, this link does not use the bias vector. This link uses\n :func:`~chainer.init_weight` to initialize the bias vector and\n passes the value of ``initial_bias`` other than ``None`` to it as\n it is.\n use_cudnn (bool): If ``True``, then this link uses cuDNN if available.\n See :func:`~chainer.functions.convolution_nd` for exact conditions\n of cuDNN availability.\n cover_all (bool): If ``True``, all spatial locations are convoluted\n into some output pixels. It may make the output size larger.\n ``cover_all`` needs to be ``False`` if you want to use cuDNN.\n\n .. seealso::\n See :func:`~chainer.functions.convolution_nd` for the definition of\n N-dimensional convolution. See\n :func:`~chainer.functions.convolution_2d` for the definition of\n two-dimensional convolution.\n\n Attributes:\n W (~chainer.Variable): Weight parameter.\n b (~chainer.Variable): Bias parameter. If ``initial_bias`` is ``None``,\n set to ``None``.\n\n \"\"\"\n\n def __init__(self, ndim, in_channels, out_channels, ksize, stride=1, pad=0,\n initialW=None, initial_bias=None, use_cudnn=True,\n cover_all=False):\n ksize = conv_nd.as_tuple(ksize, ndim)\n self.stride = stride\n self.pad = pad\n self.use_cudnn = use_cudnn\n self.cover_all = cover_all\n\n W_shape = (out_channels, in_channels) + ksize\n super(ConvolutionND, self).__init__(W=W_shape)\n initializers.init_weight(self.W.data, initialW)\n\n if initial_bias is None:\n self.b = None\n else:\n self.add_param('b', out_channels)\n initializers.init_weight(self.b.data, initial_bias)\n\n def __call__(self, x):\n \"\"\"Applies N-dimensional convolution layer.\n\n Args:\n x (~chainer.Variable): Input image.\n\n Returns:\n ~chainer.Variable: Output of convolution.\n\n \"\"\"\n return convolution_nd.convolution_nd(\n x, self.W, self.b, self.stride, self.pad,\n use_cudnn=self.use_cudnn, cover_all=self.cover_all)\n", "path": "chainer/links/connection/convolution_nd.py"}], "after_files": [{"content": "from chainer.functions.connection import convolution_nd\nfrom chainer import initializers\nfrom chainer import link\nfrom chainer.utils import conv_nd\n\n\nclass ConvolutionND(link.Link):\n \"\"\"N-dimensional convolution layer.\n\n This link wraps the :func:`~chainer.functions.convolution_nd` function and\n holds the filter weight and bias vector as parameters.\n\n Args:\n ndim (int): Number of spatial dimensions.\n in_channels (int): Number of channels of input arrays.\n out_channels (int): Number of channels of output arrays.\n ksize (int or tuple of ints): Size of filters (a.k.a. kernels).\n ``ksize=k`` and ``ksize=(k, k, ..., k)`` are equivalent.\n stride (int or tuple of ints): Stride of filter application.\n ``stride=s`` and ``stride=(s, s, ..., s)`` are equivalent.\n pad (int or tuple of ints): Spatial padding width for input arrays.\n ``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.\n initialW: Value used to initialize the filter weight. May be an\n initializer instance or another value that\n :func:`~chainer.init_weight` helper function can take.\n initial_bias: Value used to initialize the bias vector. May be an\n initializer instance or another value except ``None`` that\n :func:`~chainer.init_weight` helper function can take. If ``None``\n is given, this link does not use the bias vector.\n use_cudnn (bool): If ``True``, then this link uses cuDNN if available.\n See :func:`~chainer.functions.convolution_nd` for exact conditions\n of cuDNN availability.\n cover_all (bool): If ``True``, all spatial locations are convoluted\n into some output pixels. It may make the output size larger.\n ``cover_all`` needs to be ``False`` if you want to use cuDNN.\n\n .. seealso::\n See :func:`~chainer.functions.convolution_nd` for the definition of\n N-dimensional convolution. See\n :func:`~chainer.functions.convolution_2d` for the definition of\n two-dimensional convolution.\n\n Attributes:\n W (~chainer.Variable): Weight parameter.\n b (~chainer.Variable): Bias parameter. If ``initial_bias`` is ``None``,\n set to ``None``.\n\n \"\"\"\n\n def __init__(self, ndim, in_channels, out_channels, ksize, stride=1, pad=0,\n initialW=None, initial_bias=None, use_cudnn=True,\n cover_all=False):\n ksize = conv_nd.as_tuple(ksize, ndim)\n self.stride = stride\n self.pad = pad\n self.use_cudnn = use_cudnn\n self.cover_all = cover_all\n\n super(ConvolutionND, self).__init__()\n\n W_shape = (out_channels, in_channels) + ksize\n initialW = initializers._get_initializer(initialW)\n self.add_param('W', W_shape, initializer=initialW)\n\n if initial_bias is None:\n self.b = None\n else:\n initial_bias = initializers._get_initializer(initial_bias)\n self.add_param('b', out_channels, initializer=initial_bias)\n\n def __call__(self, x):\n \"\"\"Applies N-dimensional convolution layer.\n\n Args:\n x (~chainer.Variable): Input image.\n\n Returns:\n ~chainer.Variable: Output of convolution.\n\n \"\"\"\n return convolution_nd.convolution_nd(\n x, self.W, self.b, self.stride, self.pad,\n use_cudnn=self.use_cudnn, cover_all=self.cover_all)\n", "path": "chainer/links/connection/convolution_nd.py"}]} | 1,358 | 548 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.