problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.71k
18.9k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 465
23.6k
| num_tokens_prompt
int64 556
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_32067 | rasdani/github-patches | git_diff | blaze__blaze-1031 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spark DataFrame Join should use the native implementation
Right now we do something like this
``` python
@dispatch(Join, SparkDataFrame, SparkDataFrame)
def compute_up(expr, lhs, rhs, **kwargs):
return compute_up(expr, lhs.rdd, rhs.rdd, **kwargs)
```
Which doesn't use the native Spark DataFrame join implementation. I don't think it would be that difficult to use the native implementation. We just have to be careful about how we deal with post-join column selection. The code that does this for plain RDDs can probably be abstracted into a separate function. It may already exist in that form as well.
</issue>
<code>
[start of blaze/compute/sparksql.py]
1 """SparkSQL backend for blaze.
2
3 Notes
4 -----
5 Translation happens via the Hive sqlalchemy dialect, which is then sent to
6 SparkSQL.
7 """
8
9 from __future__ import absolute_import, division, print_function
10
11 from toolz import pipe
12 from toolz.curried import filter, map
13
14 from ..dispatch import dispatch
15 from ..expr import Expr, symbol
16 from .core import compute
17 from .utils import literalquery, istable, make_sqlalchemy_table
18 from .spark import Dummy
19
20 __all__ = []
21
22
23 try:
24 from pyspark import SQLContext
25 from pyhive.sqlalchemy_hive import HiveDialect
26 except ImportError:
27 SQLContext = Dummy
28
29
30 @dispatch(Expr, SQLContext)
31 def compute_down(expr, data, **kwargs):
32 """ Compile a blaze expression to a sparksql expression"""
33 leaves = expr._leaves()
34
35 # make sure we only have a single leaf node
36 if len(leaves) != 1:
37 raise ValueError('Must compile from exactly one root database')
38
39 leaf, = leaves
40
41 # field expressions on the database are Field instances with a record
42 # measure whose immediate child is the database leaf
43 tables = pipe(expr._subterms(), filter(istable(leaf)), list)
44
45 # raise if we don't have tables in our database
46 if not tables:
47 raise ValueError('Expressions not referencing a table cannot be '
48 'compiled')
49
50 # make new symbols for each table
51 new_leaves = [symbol(t._name, t.dshape) for t in tables]
52
53 # sub them in the expression
54 expr = expr._subs(dict(zip(tables, new_leaves)))
55
56 # compute using sqlalchemy
57 scope = dict(zip(new_leaves, map(make_sqlalchemy_table, tables)))
58 query = compute(expr, scope)
59
60 # interpolate params
61 compiled = literalquery(query, dialect=HiveDialect())
62 return data.sql(str(compiled))
63
[end of blaze/compute/sparksql.py]
[start of blaze/compute/spark.py]
1 from __future__ import absolute_import, division, print_function
2
3 from toolz import compose, identity
4 from datashape.predicates import isscalar
5
6 from ..expr import (Expr, ElemWise, Selection, Sort, Apply, Distinct, Join,
7 By, Label, Summary, by, ReLabel, Like, Reduction, Head)
8 from .python import (compute, rrowfunc, rowfunc, ElemWise, pair_assemble,
9 reduce_by_funcs, binops, like_regex_predicate)
10 from ..expr.broadcast import broadcast_collect
11 from ..compatibility import builtins, unicode
12 from ..expr import reductions
13 from ..dispatch import dispatch
14
15 from .core import compute, compute_up
16
17 __all__ = ['RDD', 'pyspark', 'SparkContext']
18
19
20 class Dummy(object):
21 sum = max = min = count = distinct = mean = variance = stdev = None
22
23 try:
24 import py4j
25 from pyspark import SparkContext
26 import pyspark
27 from pyspark.rdd import RDD
28 try:
29 from pyspark.sql import DataFrame as SparkDataFrame
30 except ImportError:
31 SparkDataFrame = Dummy
32 RDD.min
33 except (AttributeError, ImportError):
34 SparkContext = Dummy
35 pyspark = Dummy()
36 pyspark.rdd = Dummy()
37 RDD = Dummy
38 SparkDataFrame = Dummy
39
40 # PySpark adds a SIGCHLD signal handler, but that breaks other packages, so we
41 # remove it
42 # See https://issues.apache.org/jira/browse/SPARK-1394
43 try:
44 import signal
45 signal.signal(signal.SIGCHLD, signal.SIG_DFL)
46 except:
47 pass
48
49
50 @dispatch(Expr, RDD)
51 def optimize(expr, seq):
52 return broadcast_collect(expr)
53
54
55 @dispatch(ElemWise, RDD)
56 def compute_up(t, rdd, **kwargs):
57 func = rowfunc(t)
58 return rdd.map(func)
59
60
61 @dispatch(Selection, RDD)
62 def compute_up(t, rdd, **kwargs):
63 predicate = optimize(t.predicate, rdd)
64 predicate = rrowfunc(predicate, t._child)
65 return rdd.filter(predicate)
66
67
68 rdd_reductions = {
69 reductions.sum: RDD.sum,
70 reductions.min: RDD.min,
71 reductions.max: RDD.max,
72 reductions.count: RDD.count,
73 reductions.mean: RDD.mean,
74 reductions.var: RDD.variance,
75 reductions.std: RDD.stdev,
76 reductions.nunique: compose(RDD.count, RDD.distinct)}
77
78
79 @dispatch(tuple(rdd_reductions), RDD)
80 def compute_up(t, rdd, **kwargs):
81 return rdd_reductions[type(t)](rdd)
82
83
84 def istruthy(x):
85 return not not x
86
87
88 @dispatch(reductions.any, RDD)
89 def compute_up(t, rdd, **kwargs):
90 return istruthy(rdd.filter(identity).take(1))
91
92
93 @dispatch(reductions.all, RDD)
94 def compute_up(t, rdd, **kwargs):
95 return not rdd.filter(lambda x: not x).take(1)
96
97
98 @dispatch(Head, RDD)
99 def compute_up(t, rdd, **kwargs):
100 return rdd.take(t.n)
101
102
103 @dispatch(Apply, RDD)
104 def compute_up(t, rdd, **kwargs):
105 if t._splittable:
106 return rdd.mapPartitions(t.func)
107 else:
108 raise NotImplementedError("Can only apply splittable functions."
109 "To apply function to each partition add "
110 "splittable=True kwarg to call to apply. "
111 "t.apply(func, dshape, splittable=True)")
112
113
114 @dispatch(Sort, RDD)
115 def compute_up(t, rdd, **kwargs):
116 if isinstance(t.key, (str, unicode, tuple, list)):
117 key = rowfunc(t._child[t.key])
118 else:
119 key = optimize(t.key, rdd)
120 key = rrowfunc(key, t._child)
121 return (rdd.keyBy(key)
122 .sortByKey(ascending=t.ascending)
123 .map(lambda x: x[1]))
124
125
126 @dispatch(Distinct, RDD)
127 def compute_up(t, rdd, **kwargs):
128 return rdd.distinct()
129
130
131 def jgetattr(data, attr, default=None):
132 """Spark's API doesn't properly implement the ``getattr`` interface, so
133 we work around it.
134 """
135 try:
136 return getattr(data, attr, default)
137 except py4j.protocol.Py4JJavaError:
138 return default
139
140
141 @compute_up.register(Join, SparkDataFrame, SparkDataFrame)
142 def spark_df_join(t, lhs, rhs, **kwargs):
143 # ship to rdd land, so we can reuse handling of combining records code
144 rdd = compute_up(t, lhs.rdd, rhs.rdd, **kwargs)
145 return lhs.sql_ctx.createDataFrame(rdd)
146
147
148 @compute_up.register(Join, RDD, RDD)
149 def spark_join(t, lhs, rhs, **kwargs):
150 on_left = rowfunc(t.lhs[t.on_left])
151 on_right = rowfunc(t.rhs[t.on_right])
152
153 lhs = lhs.keyBy(on_left)
154 rhs = rhs.keyBy(on_right)
155
156 how = t.how
157
158 if how == 'inner':
159 joiner = lhs.join
160 elif how == 'left':
161 joiner = lhs.leftOuterJoin
162 elif how == 'right':
163 joiner = lhs.rightOuterJoin
164 elif how == 'outer':
165 joiner = lhs.fullOuterJoin
166 else:
167 raise ValueError("Invalid join type %r, must be one of "
168 "{'inner', 'left', 'right', 'outer'}" % how)
169
170 rdd = joiner(rhs)
171 assemble = pair_assemble(t)
172
173 return rdd.map(lambda x: assemble(x[1]))
174
175
176 @dispatch(By, RDD)
177 def compute_up(t, rdd, **kwargs):
178 grouper = optimize(t.grouper, rdd)
179 apply = optimize(t.apply, rdd)
180 t = by(grouper, apply)
181 if ((isinstance(t.apply, Reduction) and type(t.apply) in binops) or
182 (isinstance(t.apply, Summary) and
183 builtins.all(type(val) in binops for val in t.apply.values))):
184 grouper, binop, combiner, initial = reduce_by_funcs(t)
185
186 if isscalar(t.grouper.dshape.measure):
187 keyfunc = lambda x: (x,)
188 else:
189 keyfunc = identity
190 if isscalar(t.apply.dshape.measure):
191 valfunc = lambda x: (x,)
192 else:
193 valfunc = identity
194 unpack = lambda kv: keyfunc(kv[0]) + valfunc(kv[1])
195
196 create = lambda v: binop(initial, v)
197
198 return (rdd.keyBy(grouper)
199 .combineByKey(create, binop, combiner)
200 .map(unpack))
201 else:
202 raise NotImplementedError("By only implemented for common reductions."
203 "\nGot %s" % type(t.apply))
204
205
206 @dispatch((Label, ReLabel), RDD)
207 def compute_up(t, rdd, **kwargs):
208 return rdd
209
210
211 @dispatch(Summary, RDD)
212 def compute_up(t, rdd, **kwargs):
213 rdd = rdd.cache()
214 return tuple(compute(value, {t._child: rdd}) for value in t.values)
215
216
217 @dispatch(Like, RDD)
218 def compute_up(t, rdd, **kwargs):
219 predicate = like_regex_predicate(t)
220 return rdd.filter(predicate)
221
[end of blaze/compute/spark.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/blaze/compute/spark.py b/blaze/compute/spark.py
--- a/blaze/compute/spark.py
+++ b/blaze/compute/spark.py
@@ -138,13 +138,6 @@
return default
-@compute_up.register(Join, SparkDataFrame, SparkDataFrame)
-def spark_df_join(t, lhs, rhs, **kwargs):
- # ship to rdd land, so we can reuse handling of combining records code
- rdd = compute_up(t, lhs.rdd, rhs.rdd, **kwargs)
- return lhs.sql_ctx.createDataFrame(rdd)
-
-
@compute_up.register(Join, RDD, RDD)
def spark_join(t, lhs, rhs, **kwargs):
on_left = rowfunc(t.lhs[t.on_left])
diff --git a/blaze/compute/sparksql.py b/blaze/compute/sparksql.py
--- a/blaze/compute/sparksql.py
+++ b/blaze/compute/sparksql.py
@@ -8,14 +8,17 @@
from __future__ import absolute_import, division, print_function
+from operator import and_
+
from toolz import pipe
from toolz.curried import filter, map
from ..dispatch import dispatch
-from ..expr import Expr, symbol
+from ..expr import Expr, symbol, Join
from .core import compute
from .utils import literalquery, istable, make_sqlalchemy_table
-from .spark import Dummy
+from ..utils import listpack
+from .spark import Dummy, jgetattr
__all__ = []
@@ -26,6 +29,30 @@
except ImportError:
SQLContext = Dummy
+try:
+ from pyspark.sql import DataFrame as SparkDataFrame
+except ImportError:
+ SparkDataFrame = Dummy
+
+
+join_types = {
+ 'left': 'left_outer',
+ 'right': 'right_outer'
+}
+
+
+@dispatch(Join, SparkDataFrame, SparkDataFrame)
+def compute_up(t, lhs, rhs, **kwargs):
+ ands = [getattr(lhs, left) == getattr(rhs, right)
+ for left, right in zip(*map(listpack, (t.on_left, t.on_right)))]
+
+ joined = lhs.join(rhs, reduce(and_, ands), join_types.get(t.how, t.how))
+
+ prec, sec = (rhs, lhs) if t.how == 'right' else (lhs, rhs)
+ cols = [jgetattr(prec, f, jgetattr(sec, f, None)) for f in t.fields]
+ assert all(c is not None for c in cols)
+ return joined.select(*cols)
+
@dispatch(Expr, SQLContext)
def compute_down(expr, data, **kwargs):
| {"golden_diff": "diff --git a/blaze/compute/spark.py b/blaze/compute/spark.py\n--- a/blaze/compute/spark.py\n+++ b/blaze/compute/spark.py\n@@ -138,13 +138,6 @@\n return default\n \n \n-@compute_up.register(Join, SparkDataFrame, SparkDataFrame)\n-def spark_df_join(t, lhs, rhs, **kwargs):\n- # ship to rdd land, so we can reuse handling of combining records code\n- rdd = compute_up(t, lhs.rdd, rhs.rdd, **kwargs)\n- return lhs.sql_ctx.createDataFrame(rdd)\n-\n-\n @compute_up.register(Join, RDD, RDD)\n def spark_join(t, lhs, rhs, **kwargs):\n on_left = rowfunc(t.lhs[t.on_left])\ndiff --git a/blaze/compute/sparksql.py b/blaze/compute/sparksql.py\n--- a/blaze/compute/sparksql.py\n+++ b/blaze/compute/sparksql.py\n@@ -8,14 +8,17 @@\n \n from __future__ import absolute_import, division, print_function\n \n+from operator import and_\n+\n from toolz import pipe\n from toolz.curried import filter, map\n \n from ..dispatch import dispatch\n-from ..expr import Expr, symbol\n+from ..expr import Expr, symbol, Join\n from .core import compute\n from .utils import literalquery, istable, make_sqlalchemy_table\n-from .spark import Dummy\n+from ..utils import listpack\n+from .spark import Dummy, jgetattr\n \n __all__ = []\n \n@@ -26,6 +29,30 @@\n except ImportError:\n SQLContext = Dummy\n \n+try:\n+ from pyspark.sql import DataFrame as SparkDataFrame\n+except ImportError:\n+ SparkDataFrame = Dummy\n+\n+\n+join_types = {\n+ 'left': 'left_outer',\n+ 'right': 'right_outer'\n+}\n+\n+\n+@dispatch(Join, SparkDataFrame, SparkDataFrame)\n+def compute_up(t, lhs, rhs, **kwargs):\n+ ands = [getattr(lhs, left) == getattr(rhs, right)\n+ for left, right in zip(*map(listpack, (t.on_left, t.on_right)))]\n+\n+ joined = lhs.join(rhs, reduce(and_, ands), join_types.get(t.how, t.how))\n+\n+ prec, sec = (rhs, lhs) if t.how == 'right' else (lhs, rhs)\n+ cols = [jgetattr(prec, f, jgetattr(sec, f, None)) for f in t.fields]\n+ assert all(c is not None for c in cols)\n+ return joined.select(*cols)\n+\n \n @dispatch(Expr, SQLContext)\n def compute_down(expr, data, **kwargs):\n", "issue": "Spark DataFrame Join should use the native implementation\nRight now we do something like this\n\n``` python\n\n@dispatch(Join, SparkDataFrame, SparkDataFrame)\ndef compute_up(expr, lhs, rhs, **kwargs):\n return compute_up(expr, lhs.rdd, rhs.rdd, **kwargs)\n```\n\nWhich doesn't use the native Spark DataFrame join implementation. I don't think it would be that difficult to use the native implementation. We just have to be careful about how we deal with post-join column selection. The code that does this for plain RDDs can probably be abstracted into a separate function. It may already exist in that form as well.\n\n", "before_files": [{"content": "\"\"\"SparkSQL backend for blaze.\n\nNotes\n-----\nTranslation happens via the Hive sqlalchemy dialect, which is then sent to\nSparkSQL.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom toolz import pipe\nfrom toolz.curried import filter, map\n\nfrom ..dispatch import dispatch\nfrom ..expr import Expr, symbol\nfrom .core import compute\nfrom .utils import literalquery, istable, make_sqlalchemy_table\nfrom .spark import Dummy\n\n__all__ = []\n\n\ntry:\n from pyspark import SQLContext\n from pyhive.sqlalchemy_hive import HiveDialect\nexcept ImportError:\n SQLContext = Dummy\n\n\n@dispatch(Expr, SQLContext)\ndef compute_down(expr, data, **kwargs):\n \"\"\" Compile a blaze expression to a sparksql expression\"\"\"\n leaves = expr._leaves()\n\n # make sure we only have a single leaf node\n if len(leaves) != 1:\n raise ValueError('Must compile from exactly one root database')\n\n leaf, = leaves\n\n # field expressions on the database are Field instances with a record\n # measure whose immediate child is the database leaf\n tables = pipe(expr._subterms(), filter(istable(leaf)), list)\n\n # raise if we don't have tables in our database\n if not tables:\n raise ValueError('Expressions not referencing a table cannot be '\n 'compiled')\n\n # make new symbols for each table\n new_leaves = [symbol(t._name, t.dshape) for t in tables]\n\n # sub them in the expression\n expr = expr._subs(dict(zip(tables, new_leaves)))\n\n # compute using sqlalchemy\n scope = dict(zip(new_leaves, map(make_sqlalchemy_table, tables)))\n query = compute(expr, scope)\n\n # interpolate params\n compiled = literalquery(query, dialect=HiveDialect())\n return data.sql(str(compiled))\n", "path": "blaze/compute/sparksql.py"}, {"content": "from __future__ import absolute_import, division, print_function\n\nfrom toolz import compose, identity\nfrom datashape.predicates import isscalar\n\nfrom ..expr import (Expr, ElemWise, Selection, Sort, Apply, Distinct, Join,\n By, Label, Summary, by, ReLabel, Like, Reduction, Head)\nfrom .python import (compute, rrowfunc, rowfunc, ElemWise, pair_assemble,\n reduce_by_funcs, binops, like_regex_predicate)\nfrom ..expr.broadcast import broadcast_collect\nfrom ..compatibility import builtins, unicode\nfrom ..expr import reductions\nfrom ..dispatch import dispatch\n\nfrom .core import compute, compute_up\n\n__all__ = ['RDD', 'pyspark', 'SparkContext']\n\n\nclass Dummy(object):\n sum = max = min = count = distinct = mean = variance = stdev = None\n\ntry:\n import py4j\n from pyspark import SparkContext\n import pyspark\n from pyspark.rdd import RDD\n try:\n from pyspark.sql import DataFrame as SparkDataFrame\n except ImportError:\n SparkDataFrame = Dummy\n RDD.min\nexcept (AttributeError, ImportError):\n SparkContext = Dummy\n pyspark = Dummy()\n pyspark.rdd = Dummy()\n RDD = Dummy\n SparkDataFrame = Dummy\n\n# PySpark adds a SIGCHLD signal handler, but that breaks other packages, so we\n# remove it\n# See https://issues.apache.org/jira/browse/SPARK-1394\ntry:\n import signal\n signal.signal(signal.SIGCHLD, signal.SIG_DFL)\nexcept:\n pass\n\n\n@dispatch(Expr, RDD)\ndef optimize(expr, seq):\n return broadcast_collect(expr)\n\n\n@dispatch(ElemWise, RDD)\ndef compute_up(t, rdd, **kwargs):\n func = rowfunc(t)\n return rdd.map(func)\n\n\n@dispatch(Selection, RDD)\ndef compute_up(t, rdd, **kwargs):\n predicate = optimize(t.predicate, rdd)\n predicate = rrowfunc(predicate, t._child)\n return rdd.filter(predicate)\n\n\nrdd_reductions = {\n reductions.sum: RDD.sum,\n reductions.min: RDD.min,\n reductions.max: RDD.max,\n reductions.count: RDD.count,\n reductions.mean: RDD.mean,\n reductions.var: RDD.variance,\n reductions.std: RDD.stdev,\n reductions.nunique: compose(RDD.count, RDD.distinct)}\n\n\n@dispatch(tuple(rdd_reductions), RDD)\ndef compute_up(t, rdd, **kwargs):\n return rdd_reductions[type(t)](rdd)\n\n\ndef istruthy(x):\n return not not x\n\n\n@dispatch(reductions.any, RDD)\ndef compute_up(t, rdd, **kwargs):\n return istruthy(rdd.filter(identity).take(1))\n\n\n@dispatch(reductions.all, RDD)\ndef compute_up(t, rdd, **kwargs):\n return not rdd.filter(lambda x: not x).take(1)\n\n\n@dispatch(Head, RDD)\ndef compute_up(t, rdd, **kwargs):\n return rdd.take(t.n)\n\n\n@dispatch(Apply, RDD)\ndef compute_up(t, rdd, **kwargs):\n if t._splittable:\n return rdd.mapPartitions(t.func)\n else:\n raise NotImplementedError(\"Can only apply splittable functions.\"\n \"To apply function to each partition add \"\n \"splittable=True kwarg to call to apply. \"\n \"t.apply(func, dshape, splittable=True)\")\n\n\n@dispatch(Sort, RDD)\ndef compute_up(t, rdd, **kwargs):\n if isinstance(t.key, (str, unicode, tuple, list)):\n key = rowfunc(t._child[t.key])\n else:\n key = optimize(t.key, rdd)\n key = rrowfunc(key, t._child)\n return (rdd.keyBy(key)\n .sortByKey(ascending=t.ascending)\n .map(lambda x: x[1]))\n\n\n@dispatch(Distinct, RDD)\ndef compute_up(t, rdd, **kwargs):\n return rdd.distinct()\n\n\ndef jgetattr(data, attr, default=None):\n \"\"\"Spark's API doesn't properly implement the ``getattr`` interface, so\n we work around it.\n \"\"\"\n try:\n return getattr(data, attr, default)\n except py4j.protocol.Py4JJavaError:\n return default\n\n\n@compute_up.register(Join, SparkDataFrame, SparkDataFrame)\ndef spark_df_join(t, lhs, rhs, **kwargs):\n # ship to rdd land, so we can reuse handling of combining records code\n rdd = compute_up(t, lhs.rdd, rhs.rdd, **kwargs)\n return lhs.sql_ctx.createDataFrame(rdd)\n\n\n@compute_up.register(Join, RDD, RDD)\ndef spark_join(t, lhs, rhs, **kwargs):\n on_left = rowfunc(t.lhs[t.on_left])\n on_right = rowfunc(t.rhs[t.on_right])\n\n lhs = lhs.keyBy(on_left)\n rhs = rhs.keyBy(on_right)\n\n how = t.how\n\n if how == 'inner':\n joiner = lhs.join\n elif how == 'left':\n joiner = lhs.leftOuterJoin\n elif how == 'right':\n joiner = lhs.rightOuterJoin\n elif how == 'outer':\n joiner = lhs.fullOuterJoin\n else:\n raise ValueError(\"Invalid join type %r, must be one of \"\n \"{'inner', 'left', 'right', 'outer'}\" % how)\n\n rdd = joiner(rhs)\n assemble = pair_assemble(t)\n\n return rdd.map(lambda x: assemble(x[1]))\n\n\n@dispatch(By, RDD)\ndef compute_up(t, rdd, **kwargs):\n grouper = optimize(t.grouper, rdd)\n apply = optimize(t.apply, rdd)\n t = by(grouper, apply)\n if ((isinstance(t.apply, Reduction) and type(t.apply) in binops) or\n (isinstance(t.apply, Summary) and\n builtins.all(type(val) in binops for val in t.apply.values))):\n grouper, binop, combiner, initial = reduce_by_funcs(t)\n\n if isscalar(t.grouper.dshape.measure):\n keyfunc = lambda x: (x,)\n else:\n keyfunc = identity\n if isscalar(t.apply.dshape.measure):\n valfunc = lambda x: (x,)\n else:\n valfunc = identity\n unpack = lambda kv: keyfunc(kv[0]) + valfunc(kv[1])\n\n create = lambda v: binop(initial, v)\n\n return (rdd.keyBy(grouper)\n .combineByKey(create, binop, combiner)\n .map(unpack))\n else:\n raise NotImplementedError(\"By only implemented for common reductions.\"\n \"\\nGot %s\" % type(t.apply))\n\n\n@dispatch((Label, ReLabel), RDD)\ndef compute_up(t, rdd, **kwargs):\n return rdd\n\n\n@dispatch(Summary, RDD)\ndef compute_up(t, rdd, **kwargs):\n rdd = rdd.cache()\n return tuple(compute(value, {t._child: rdd}) for value in t.values)\n\n\n@dispatch(Like, RDD)\ndef compute_up(t, rdd, **kwargs):\n predicate = like_regex_predicate(t)\n return rdd.filter(predicate)\n", "path": "blaze/compute/spark.py"}]} | 3,421 | 605 |
gh_patches_debug_14366 | rasdani/github-patches | git_diff | conan-io__conan-13211 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[bug] Conan build command does not support conanfile.txt as described
### Description
The documentation about [build](https://docs.conan.io/2/reference/commands/build.html) command says:
```
usage: conan build [-h] [-v [V]] [--logger] [--name NAME] [--version VERSION] [--user USER] [--channel CHANNEL] [-of OUTPUT_FOLDER] [-b BUILD] [-r REMOTE | -nr] [-u] [-o OPTIONS_HOST] [-o:b OPTIONS_BUILD] [-o:h OPTIONS_HOST] [-pr PROFILE_HOST] [-pr:b PROFILE_BUILD]
[-pr:h PROFILE_HOST] [-s SETTINGS_HOST] [-s:b SETTINGS_BUILD] [-s:h SETTINGS_HOST] [-c CONF_HOST] [-c:b CONF_BUILD] [-c:h CONF_HOST] [-l LOCKFILE] [--lockfile-partial] [--lockfile-out LOCKFILE_OUT] [--lockfile-packages] [--lockfile-clean]
[path]
Install dependencies and call the build() method.
positional arguments:
path Path to a folder containing a recipe (conanfile.py or conanfile.txt) or to a recipe file. e.g., ./my_project/conanfile.txt.
```
However, `conanfile.txt` is not acceptable by build command.
As the documentation is extracted from the command output, it should be fixed on Conan client first.
### Environment details
* Operating System+version: OSX 13
* Compiler+version: Apple-Clang 14
* Conan version: 2.0.0
* Python version: 3.10
### Steps to reproduce
1. mkdir /tmp/foo && cd /tmp/foo
2. echo "[requires]\nzlib/1.2.13" > conanfile.txt
3. conan build .
4. Or, conan build ./conanfile.txt
### Logs
```
% conan build .
ERROR: Conanfile not found at /private/tmp/foo/conanfile.py
% conan build ./conanfile.txt
ERROR: A conanfile.py is needed, /private/tmp/conantxt/conanfile.txt is not acceptable
```
</issue>
<code>
[start of conan/cli/commands/build.py]
1 import os
2
3 from conan.api.output import ConanOutput
4 from conan.cli.command import conan_command
5 from conan.cli.commands import make_abs_path
6 from conan.cli.args import add_lockfile_args, add_common_install_arguments, add_reference_args
7 from conan.internal.conan_app import ConanApp
8 from conan.cli.printers.graph import print_graph_packages, print_graph_basic
9 from conans.client.conanfile.build import run_build_method
10
11
12 @conan_command(group='Creator')
13 def build(conan_api, parser, *args):
14 """
15 Install dependencies and call the build() method.
16 """
17 parser.add_argument("path", nargs="?",
18 help="Path to a folder containing a recipe (conanfile.py "
19 "or conanfile.txt) or to a recipe file. e.g., "
20 "./my_project/conanfile.txt.")
21 add_reference_args(parser)
22 # TODO: Missing --build-require argument and management
23 parser.add_argument("-of", "--output-folder",
24 help='The root output folder for generated and build files')
25 add_common_install_arguments(parser)
26 add_lockfile_args(parser)
27 args = parser.parse_args(*args)
28
29 cwd = os.getcwd()
30 path = conan_api.local.get_conanfile_path(args.path, cwd, py=True)
31 folder = os.path.dirname(path)
32 remotes = conan_api.remotes.list(args.remote) if not args.no_remote else []
33
34 lockfile = conan_api.lockfile.get_lockfile(lockfile=args.lockfile,
35 conanfile_path=path,
36 cwd=cwd,
37 partial=args.lockfile_partial)
38 profile_host, profile_build = conan_api.profiles.get_profiles_from_args(args)
39
40 deps_graph = conan_api.graph.load_graph_consumer(path, args.name, args.version,
41 args.user, args.channel,
42 profile_host, profile_build, lockfile, remotes,
43 args.update)
44 print_graph_basic(deps_graph)
45 deps_graph.report_graph_error()
46 conan_api.graph.analyze_binaries(deps_graph, args.build, remotes=remotes, update=args.update,
47 lockfile=lockfile)
48 print_graph_packages(deps_graph)
49
50 out = ConanOutput()
51 out.title("Installing packages")
52 conan_api.install.install_binaries(deps_graph=deps_graph, remotes=remotes)
53
54 source_folder = folder
55 output_folder = make_abs_path(args.output_folder, cwd) if args.output_folder else None
56 out.title("Finalizing install (deploy, generators)")
57 conan_api.install.install_consumer(deps_graph=deps_graph, source_folder=source_folder,
58 output_folder=output_folder)
59
60 # TODO: Decide API to put this
61 app = ConanApp(conan_api.cache_folder)
62 conanfile = deps_graph.root.conanfile
63 conanfile.folders.set_base_package(conanfile.folders.base_build)
64 run_build_method(conanfile, app.hook_manager)
65
66 lockfile = conan_api.lockfile.update_lockfile(lockfile, deps_graph, args.lockfile_packages,
67 clean=args.lockfile_clean)
68 conan_api.lockfile.save_lockfile(lockfile, args.lockfile_out, source_folder)
69 return deps_graph
70
[end of conan/cli/commands/build.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conan/cli/commands/build.py b/conan/cli/commands/build.py
--- a/conan/cli/commands/build.py
+++ b/conan/cli/commands/build.py
@@ -15,9 +15,9 @@
Install dependencies and call the build() method.
"""
parser.add_argument("path", nargs="?",
- help="Path to a folder containing a recipe (conanfile.py "
- "or conanfile.txt) or to a recipe file. e.g., "
- "./my_project/conanfile.txt.")
+ help='Path to a python-based recipe file or a folder '
+ 'containing a conanfile.py recipe. conanfile.txt '
+ 'cannot be used with conan build.')
add_reference_args(parser)
# TODO: Missing --build-require argument and management
parser.add_argument("-of", "--output-folder",
| {"golden_diff": "diff --git a/conan/cli/commands/build.py b/conan/cli/commands/build.py\n--- a/conan/cli/commands/build.py\n+++ b/conan/cli/commands/build.py\n@@ -15,9 +15,9 @@\n Install dependencies and call the build() method.\n \"\"\"\n parser.add_argument(\"path\", nargs=\"?\",\n- help=\"Path to a folder containing a recipe (conanfile.py \"\n- \"or conanfile.txt) or to a recipe file. e.g., \"\n- \"./my_project/conanfile.txt.\")\n+ help='Path to a python-based recipe file or a folder '\n+ 'containing a conanfile.py recipe. conanfile.txt '\n+ 'cannot be used with conan build.')\n add_reference_args(parser)\n # TODO: Missing --build-require argument and management\n parser.add_argument(\"-of\", \"--output-folder\",\n", "issue": "[bug] Conan build command does not support conanfile.txt as described\n### Description\r\n\r\nThe documentation about [build](https://docs.conan.io/2/reference/commands/build.html) command says:\r\n\r\n```\r\nusage: conan build [-h] [-v [V]] [--logger] [--name NAME] [--version VERSION] [--user USER] [--channel CHANNEL] [-of OUTPUT_FOLDER] [-b BUILD] [-r REMOTE | -nr] [-u] [-o OPTIONS_HOST] [-o:b OPTIONS_BUILD] [-o:h OPTIONS_HOST] [-pr PROFILE_HOST] [-pr:b PROFILE_BUILD]\r\n [-pr:h PROFILE_HOST] [-s SETTINGS_HOST] [-s:b SETTINGS_BUILD] [-s:h SETTINGS_HOST] [-c CONF_HOST] [-c:b CONF_BUILD] [-c:h CONF_HOST] [-l LOCKFILE] [--lockfile-partial] [--lockfile-out LOCKFILE_OUT] [--lockfile-packages] [--lockfile-clean]\r\n [path]\r\n\r\nInstall dependencies and call the build() method.\r\n\r\npositional arguments:\r\n path Path to a folder containing a recipe (conanfile.py or conanfile.txt) or to a recipe file. e.g., ./my_project/conanfile.txt.\r\n```\r\n\r\nHowever, `conanfile.txt` is not acceptable by build command.\r\n\r\nAs the documentation is extracted from the command output, it should be fixed on Conan client first.\r\n\r\n\r\n### Environment details\r\n\r\n* Operating System+version: OSX 13\r\n* Compiler+version: Apple-Clang 14\r\n* Conan version: 2.0.0\r\n* Python version: 3.10\r\n\r\n\r\n### Steps to reproduce\r\n\r\n1. mkdir /tmp/foo && cd /tmp/foo\r\n2. echo \"[requires]\\nzlib/1.2.13\" > conanfile.txt\r\n3. conan build .\r\n4. Or, conan build ./conanfile.txt\r\n\r\n### Logs\r\n\r\n```\r\n% conan build .\r\nERROR: Conanfile not found at /private/tmp/foo/conanfile.py\r\n\r\n% conan build ./conanfile.txt \r\nERROR: A conanfile.py is needed, /private/tmp/conantxt/conanfile.txt is not acceptable\r\n```\n", "before_files": [{"content": "import os\n\nfrom conan.api.output import ConanOutput\nfrom conan.cli.command import conan_command\nfrom conan.cli.commands import make_abs_path\nfrom conan.cli.args import add_lockfile_args, add_common_install_arguments, add_reference_args\nfrom conan.internal.conan_app import ConanApp\nfrom conan.cli.printers.graph import print_graph_packages, print_graph_basic\nfrom conans.client.conanfile.build import run_build_method\n\n\n@conan_command(group='Creator')\ndef build(conan_api, parser, *args):\n \"\"\"\n Install dependencies and call the build() method.\n \"\"\"\n parser.add_argument(\"path\", nargs=\"?\",\n help=\"Path to a folder containing a recipe (conanfile.py \"\n \"or conanfile.txt) or to a recipe file. e.g., \"\n \"./my_project/conanfile.txt.\")\n add_reference_args(parser)\n # TODO: Missing --build-require argument and management\n parser.add_argument(\"-of\", \"--output-folder\",\n help='The root output folder for generated and build files')\n add_common_install_arguments(parser)\n add_lockfile_args(parser)\n args = parser.parse_args(*args)\n\n cwd = os.getcwd()\n path = conan_api.local.get_conanfile_path(args.path, cwd, py=True)\n folder = os.path.dirname(path)\n remotes = conan_api.remotes.list(args.remote) if not args.no_remote else []\n\n lockfile = conan_api.lockfile.get_lockfile(lockfile=args.lockfile,\n conanfile_path=path,\n cwd=cwd,\n partial=args.lockfile_partial)\n profile_host, profile_build = conan_api.profiles.get_profiles_from_args(args)\n\n deps_graph = conan_api.graph.load_graph_consumer(path, args.name, args.version,\n args.user, args.channel,\n profile_host, profile_build, lockfile, remotes,\n args.update)\n print_graph_basic(deps_graph)\n deps_graph.report_graph_error()\n conan_api.graph.analyze_binaries(deps_graph, args.build, remotes=remotes, update=args.update,\n lockfile=lockfile)\n print_graph_packages(deps_graph)\n\n out = ConanOutput()\n out.title(\"Installing packages\")\n conan_api.install.install_binaries(deps_graph=deps_graph, remotes=remotes)\n\n source_folder = folder\n output_folder = make_abs_path(args.output_folder, cwd) if args.output_folder else None\n out.title(\"Finalizing install (deploy, generators)\")\n conan_api.install.install_consumer(deps_graph=deps_graph, source_folder=source_folder,\n output_folder=output_folder)\n\n # TODO: Decide API to put this\n app = ConanApp(conan_api.cache_folder)\n conanfile = deps_graph.root.conanfile\n conanfile.folders.set_base_package(conanfile.folders.base_build)\n run_build_method(conanfile, app.hook_manager)\n\n lockfile = conan_api.lockfile.update_lockfile(lockfile, deps_graph, args.lockfile_packages,\n clean=args.lockfile_clean)\n conan_api.lockfile.save_lockfile(lockfile, args.lockfile_out, source_folder)\n return deps_graph\n", "path": "conan/cli/commands/build.py"}]} | 1,803 | 193 |
gh_patches_debug_20246 | rasdani/github-patches | git_diff | PrefectHQ__prefect-3917 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Using self.description when subclassing Task causes ValueError
## Description
I believe this is probably somewhat related to #1212.
When inheriting from Task, if your subclass defines a `description` attribute, the following error is produced when creating and registering a flow with cloud:
```
ValueError: Flow could not be deserialized successfully. Error was: TypeError("__init__() got an unexpected keyword argument 'description'")
```
The full traceback is at the bottom of this issue.
## Expected Behavior
The attribute would be ignored as it's not a reserved attribute for the Task [class](https://github.com/PrefectHQ/prefect/blob/4d8337f75fe9bbb3024faa3b74a8f7debbb596d0/src/prefect/core/task.py#L182).
## Reproduction
```python
from prefect import Task, Flow
class MyTask(Task):
def __init__(self):
super().__init__()
self.name = "NameOfTask"
self.description = "This is the description"
def run(self):
return "All done!"
tsk = MyTask()
flow = Flow("my-flow", tasks=[tsk])
flow.register(project_name="project_name")
```
## Workaround
Using another attribute name, e.g. `my_description`, avoids the error.
## Environment
```json
{
"config_overrides": {
"cloud": {
"agent": {
"auth_token": true
}
}
},
"env_vars": [],
"system_information": {
"platform": "Linux-5.4.0-58-generic-x86_64-with-glibc2.27",
"prefect_backend": "cloud",
"prefect_version": "0.14.1",
"python_version": "3.8.7"
}
}
```
## Full Traceback
```bash
$ /data/my-example/.venv/bin/python3.8 /data/my-example/bin/my_example.py
Result check: OK
Traceback (most recent call last):
File "/data/my-example/.venv/lib/python3.8/site-packages/prefect/client/client.py", line 799, in register
prefect.serialization.flow.FlowSchema().load(serialized_flow)
File "/data/my-example/.venv/lib/python3.8/site-packages/prefect/utilities/serialization.py", line 141, in load
return super().load(data, **kwargs)
File "/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py", line 727, in load
return self._do_load(
File "/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py", line 861, in _do_load
result = self._deserialize(
File "/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py", line 669, in _deserialize
value = self._call_and_store(
File "/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py", line 496, in _call_and_store
value = getter_func(data)
File "/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py", line 666, in <lambda>
getter = lambda val: field_obj.deserialize(
File "/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/fields.py", line 356, in deserialize
output = self._deserialize(value, attr, data, **kwargs)
File "/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/fields.py", line 611, in _deserialize
return self._load(value, data, partial=partial)
File "/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/fields.py", line 594, in _load
valid_data = self.schema.load(value, unknown=self.unknown, partial=partial)
File "/data/my-example/.venv/lib/python3.8/site-packages/prefect/utilities/serialization.py", line 141, in load
return super().load(data, **kwargs)
File "/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py", line 727, in load
return self._do_load(
File "/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py", line 897, in _do_load
result = self._invoke_load_processors(
File "/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py", line 1095, in _invoke_load_processors
data = self._invoke_processors(
File "/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py", line 1222, in _invoke_processors
data = [processor(item, many=many, **kwargs) for item in data]
File "/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py", line 1222, in <listcomp>
data = [processor(item, many=many, **kwargs) for item in data]
File "/data/my-example/.venv/lib/python3.8/site-packages/prefect/serialization/task.py", line 58, in create_object
task = super().create_object(data) # type: ignore
File "/data/my-example/.venv/lib/python3.8/site-packages/prefect/utilities/serialization.py", line 164, in create_object
return object_class(**init_data)
File "/data/my-example/.venv/lib/python3.8/site-packages/prefect/core/task.py", line 157, in init
old_init(self, *args, **kwargs)
TypeError: __init__() got an unexpected keyword argument 'description'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/data/my-example/bin/my_example.py", line 16, in <module>
flow.register(project_name="project_name")
File "/data/my-example/.venv/lib/python3.8/site-packages/prefect/core/flow.py", line 1665, in register
registered_flow = client.register(
File "/data/my-example/.venv/lib/python3.8/site-packages/prefect/client/client.py", line 801, in register
raise ValueError(
ValueError: Flow could not be deserialized successfully. Error was: TypeError("__init__() got an unexpected keyword argument 'description'")
```
</issue>
<code>
[start of src/prefect/serialization/task.py]
1 from typing import TYPE_CHECKING, Any, Dict
2
3 from marshmallow import fields, post_load
4
5 import prefect
6 from prefect.utilities.serialization import (
7 JSONCompatible,
8 ObjectSchema,
9 StatefulFunctionReference,
10 to_qualified_name,
11 SortedList,
12 )
13
14 if TYPE_CHECKING:
15 import prefect.engine
16 import prefect.engine.cache_validators
17 import prefect.triggers
18
19
20 class TaskMethodsMixin:
21 def get_attribute(self, obj: Any, key: str, default: Any) -> Any:
22 """
23 By default, Marshmallow attempts to index an object, then get its attributes.
24 Indexing a Task results in a new IndexTask, so for tasks we use getattr(). Otherwise
25 we use the default method.
26 """
27 if isinstance(obj, prefect.Task):
28 return getattr(obj, key, default)
29 else:
30 return super().get_attribute(obj, key, default) # type: ignore
31
32 def load_inputs(self, task: prefect.core.Task) -> Dict[str, Dict]:
33 if not isinstance(task, prefect.core.Task):
34 return self.get_attribute(task, "inputs", None)
35 inputs = {}
36 for k, v in task.inputs().items():
37 inputs[k] = dict(required=v["required"], type=str(v["type"]))
38 return inputs
39
40 def load_outputs(self, task: prefect.core.Task) -> str:
41 if not isinstance(task, prefect.core.Task):
42 return self.get_attribute(task, "outputs", None)
43 return str(task.outputs())
44
45 @post_load
46 def create_object(self, data: dict, **kwargs: Any) -> prefect.core.Task:
47 """
48 Sometimes we deserialize tasks and edges simultaneously (for example, when a
49 Flow is being deserialized), in which case we check slugs to see if we already
50 deserialized a matching task. In that case, we reload the task from a shared
51 cache.
52 """
53 slug = data.get("slug")
54 auto_generated = data.pop("auto_generated", False)
55
56 # if the slug is not in the task cache, create a task object and add it
57 if slug not in self.context.setdefault("task_cache", {}): # type: ignore
58 task = super().create_object(data) # type: ignore
59 task.auto_generated = auto_generated # type: ignore
60 self.context["task_cache"][slug] = task # type: ignore
61
62 # return the task object from the cache
63 return self.context["task_cache"][slug] # type: ignore
64
65
66 class TaskSchema(TaskMethodsMixin, ObjectSchema):
67 class Meta:
68 object_class = lambda: prefect.core.Task
69 exclude_fields = ["type", "inputs", "outputs"]
70
71 type = fields.Function(lambda task: to_qualified_name(type(task)), lambda x: x)
72 name = fields.String(allow_none=True)
73 slug = fields.String(allow_none=True)
74 description = fields.String(allow_none=True)
75 tags = SortedList(fields.String())
76 max_retries = fields.Integer(allow_none=True)
77 retry_delay = fields.TimeDelta(allow_none=True)
78 inputs = fields.Method("load_inputs", allow_none=True)
79 outputs = fields.Method("load_outputs", allow_none=True)
80 timeout = fields.Integer(allow_none=True)
81 trigger = StatefulFunctionReference(
82 valid_functions=[
83 prefect.triggers.all_finished,
84 prefect.triggers.manual_only,
85 prefect.triggers.always_run,
86 prefect.triggers.all_successful,
87 prefect.triggers.all_failed,
88 prefect.triggers.any_successful,
89 prefect.triggers.any_failed,
90 prefect.triggers.some_failed,
91 prefect.triggers.some_successful,
92 ],
93 # don't reject custom functions, just leave them as strings
94 reject_invalid=False,
95 allow_none=True,
96 )
97 skip_on_upstream_skip = fields.Boolean(allow_none=True)
98 cache_for = fields.TimeDelta(allow_none=True)
99 cache_key = fields.String(allow_none=True)
100 cache_validator = StatefulFunctionReference(
101 valid_functions=[
102 prefect.engine.cache_validators.never_use,
103 prefect.engine.cache_validators.duration_only,
104 prefect.engine.cache_validators.all_inputs,
105 prefect.engine.cache_validators.all_parameters,
106 prefect.engine.cache_validators.partial_inputs_only,
107 prefect.engine.cache_validators.partial_parameters_only,
108 ],
109 # don't reject custom functions, just leave them as strings
110 reject_invalid=False,
111 allow_none=True,
112 )
113 auto_generated = fields.Boolean(allow_none=True)
114
115
116 class ParameterSchema(TaskMethodsMixin, ObjectSchema):
117 class Meta:
118 object_class = lambda: prefect.core.parameter.Parameter # type: ignore
119 exclude_fields = ["type", "outputs", "slug"]
120
121 type = fields.Function(lambda task: to_qualified_name(type(task)), lambda x: x)
122 name = fields.String(required=True)
123 slug = fields.String(allow_none=True)
124 default = JSONCompatible(allow_none=True)
125 required = fields.Boolean(allow_none=True)
126 description = fields.String(allow_none=True)
127 tags = SortedList(fields.String())
128 outputs = fields.Method("load_outputs", allow_none=True)
129
[end of src/prefect/serialization/task.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/prefect/serialization/task.py b/src/prefect/serialization/task.py
--- a/src/prefect/serialization/task.py
+++ b/src/prefect/serialization/task.py
@@ -71,7 +71,6 @@
type = fields.Function(lambda task: to_qualified_name(type(task)), lambda x: x)
name = fields.String(allow_none=True)
slug = fields.String(allow_none=True)
- description = fields.String(allow_none=True)
tags = SortedList(fields.String())
max_retries = fields.Integer(allow_none=True)
retry_delay = fields.TimeDelta(allow_none=True)
@@ -123,6 +122,5 @@
slug = fields.String(allow_none=True)
default = JSONCompatible(allow_none=True)
required = fields.Boolean(allow_none=True)
- description = fields.String(allow_none=True)
tags = SortedList(fields.String())
outputs = fields.Method("load_outputs", allow_none=True)
| {"golden_diff": "diff --git a/src/prefect/serialization/task.py b/src/prefect/serialization/task.py\n--- a/src/prefect/serialization/task.py\n+++ b/src/prefect/serialization/task.py\n@@ -71,7 +71,6 @@\n type = fields.Function(lambda task: to_qualified_name(type(task)), lambda x: x)\n name = fields.String(allow_none=True)\n slug = fields.String(allow_none=True)\n- description = fields.String(allow_none=True)\n tags = SortedList(fields.String())\n max_retries = fields.Integer(allow_none=True)\n retry_delay = fields.TimeDelta(allow_none=True)\n@@ -123,6 +122,5 @@\n slug = fields.String(allow_none=True)\n default = JSONCompatible(allow_none=True)\n required = fields.Boolean(allow_none=True)\n- description = fields.String(allow_none=True)\n tags = SortedList(fields.String())\n outputs = fields.Method(\"load_outputs\", allow_none=True)\n", "issue": "Using self.description when subclassing Task causes ValueError\n## Description\r\nI believe this is probably somewhat related to #1212.\r\n\r\nWhen inheriting from Task, if your subclass defines a `description` attribute, the following error is produced when creating and registering a flow with cloud:\r\n\r\n```\r\nValueError: Flow could not be deserialized successfully. Error was: TypeError(\"__init__() got an unexpected keyword argument 'description'\")\r\n```\r\nThe full traceback is at the bottom of this issue.\r\n\r\n## Expected Behavior\r\n\r\nThe attribute would be ignored as it's not a reserved attribute for the Task [class](https://github.com/PrefectHQ/prefect/blob/4d8337f75fe9bbb3024faa3b74a8f7debbb596d0/src/prefect/core/task.py#L182).\r\n\r\n\r\n## Reproduction\r\n\r\n```python\r\nfrom prefect import Task, Flow\r\n\r\nclass MyTask(Task):\r\n def __init__(self):\r\n super().__init__()\r\n self.name = \"NameOfTask\"\r\n self.description = \"This is the description\"\r\n\r\n def run(self):\r\n return \"All done!\"\r\n\r\ntsk = MyTask()\r\n\r\nflow = Flow(\"my-flow\", tasks=[tsk])\r\n\r\nflow.register(project_name=\"project_name\")\r\n```\r\n\r\n## Workaround\r\n\r\nUsing another attribute name, e.g. `my_description`, avoids the error.\r\n\r\n## Environment\r\n```json\r\n{\r\n \"config_overrides\": {\r\n \"cloud\": {\r\n \"agent\": {\r\n \"auth_token\": true\r\n }\r\n }\r\n },\r\n \"env_vars\": [],\r\n \"system_information\": {\r\n \"platform\": \"Linux-5.4.0-58-generic-x86_64-with-glibc2.27\",\r\n \"prefect_backend\": \"cloud\",\r\n \"prefect_version\": \"0.14.1\",\r\n \"python_version\": \"3.8.7\"\r\n }\r\n}\r\n```\r\n\r\n## Full Traceback\r\n```bash\r\n$ /data/my-example/.venv/bin/python3.8 /data/my-example/bin/my_example.py\r\nResult check: OK\r\nTraceback (most recent call last):\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/prefect/client/client.py\", line 799, in register\r\n prefect.serialization.flow.FlowSchema().load(serialized_flow)\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/prefect/utilities/serialization.py\", line 141, in load\r\n return super().load(data, **kwargs)\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py\", line 727, in load\r\n return self._do_load(\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py\", line 861, in _do_load\r\n result = self._deserialize(\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py\", line 669, in _deserialize\r\n value = self._call_and_store(\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py\", line 496, in _call_and_store\r\n value = getter_func(data)\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py\", line 666, in <lambda>\r\n getter = lambda val: field_obj.deserialize(\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/fields.py\", line 356, in deserialize\r\n output = self._deserialize(value, attr, data, **kwargs)\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/fields.py\", line 611, in _deserialize\r\n return self._load(value, data, partial=partial)\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/fields.py\", line 594, in _load\r\n valid_data = self.schema.load(value, unknown=self.unknown, partial=partial)\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/prefect/utilities/serialization.py\", line 141, in load\r\n return super().load(data, **kwargs)\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py\", line 727, in load\r\n return self._do_load(\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py\", line 897, in _do_load\r\n result = self._invoke_load_processors(\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py\", line 1095, in _invoke_load_processors\r\n data = self._invoke_processors(\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py\", line 1222, in _invoke_processors\r\n data = [processor(item, many=many, **kwargs) for item in data]\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/marshmallow/schema.py\", line 1222, in <listcomp>\r\n data = [processor(item, many=many, **kwargs) for item in data]\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/prefect/serialization/task.py\", line 58, in create_object\r\n task = super().create_object(data) # type: ignore\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/prefect/utilities/serialization.py\", line 164, in create_object\r\n return object_class(**init_data)\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/prefect/core/task.py\", line 157, in init\r\n old_init(self, *args, **kwargs)\r\nTypeError: __init__() got an unexpected keyword argument 'description'\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"/data/my-example/bin/my_example.py\", line 16, in <module>\r\n flow.register(project_name=\"project_name\")\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/prefect/core/flow.py\", line 1665, in register\r\n registered_flow = client.register(\r\n File \"/data/my-example/.venv/lib/python3.8/site-packages/prefect/client/client.py\", line 801, in register\r\n raise ValueError(\r\nValueError: Flow could not be deserialized successfully. Error was: TypeError(\"__init__() got an unexpected keyword argument 'description'\")\r\n```\n", "before_files": [{"content": "from typing import TYPE_CHECKING, Any, Dict\n\nfrom marshmallow import fields, post_load\n\nimport prefect\nfrom prefect.utilities.serialization import (\n JSONCompatible,\n ObjectSchema,\n StatefulFunctionReference,\n to_qualified_name,\n SortedList,\n)\n\nif TYPE_CHECKING:\n import prefect.engine\n import prefect.engine.cache_validators\n import prefect.triggers\n\n\nclass TaskMethodsMixin:\n def get_attribute(self, obj: Any, key: str, default: Any) -> Any:\n \"\"\"\n By default, Marshmallow attempts to index an object, then get its attributes.\n Indexing a Task results in a new IndexTask, so for tasks we use getattr(). Otherwise\n we use the default method.\n \"\"\"\n if isinstance(obj, prefect.Task):\n return getattr(obj, key, default)\n else:\n return super().get_attribute(obj, key, default) # type: ignore\n\n def load_inputs(self, task: prefect.core.Task) -> Dict[str, Dict]:\n if not isinstance(task, prefect.core.Task):\n return self.get_attribute(task, \"inputs\", None)\n inputs = {}\n for k, v in task.inputs().items():\n inputs[k] = dict(required=v[\"required\"], type=str(v[\"type\"]))\n return inputs\n\n def load_outputs(self, task: prefect.core.Task) -> str:\n if not isinstance(task, prefect.core.Task):\n return self.get_attribute(task, \"outputs\", None)\n return str(task.outputs())\n\n @post_load\n def create_object(self, data: dict, **kwargs: Any) -> prefect.core.Task:\n \"\"\"\n Sometimes we deserialize tasks and edges simultaneously (for example, when a\n Flow is being deserialized), in which case we check slugs to see if we already\n deserialized a matching task. In that case, we reload the task from a shared\n cache.\n \"\"\"\n slug = data.get(\"slug\")\n auto_generated = data.pop(\"auto_generated\", False)\n\n # if the slug is not in the task cache, create a task object and add it\n if slug not in self.context.setdefault(\"task_cache\", {}): # type: ignore\n task = super().create_object(data) # type: ignore\n task.auto_generated = auto_generated # type: ignore\n self.context[\"task_cache\"][slug] = task # type: ignore\n\n # return the task object from the cache\n return self.context[\"task_cache\"][slug] # type: ignore\n\n\nclass TaskSchema(TaskMethodsMixin, ObjectSchema):\n class Meta:\n object_class = lambda: prefect.core.Task\n exclude_fields = [\"type\", \"inputs\", \"outputs\"]\n\n type = fields.Function(lambda task: to_qualified_name(type(task)), lambda x: x)\n name = fields.String(allow_none=True)\n slug = fields.String(allow_none=True)\n description = fields.String(allow_none=True)\n tags = SortedList(fields.String())\n max_retries = fields.Integer(allow_none=True)\n retry_delay = fields.TimeDelta(allow_none=True)\n inputs = fields.Method(\"load_inputs\", allow_none=True)\n outputs = fields.Method(\"load_outputs\", allow_none=True)\n timeout = fields.Integer(allow_none=True)\n trigger = StatefulFunctionReference(\n valid_functions=[\n prefect.triggers.all_finished,\n prefect.triggers.manual_only,\n prefect.triggers.always_run,\n prefect.triggers.all_successful,\n prefect.triggers.all_failed,\n prefect.triggers.any_successful,\n prefect.triggers.any_failed,\n prefect.triggers.some_failed,\n prefect.triggers.some_successful,\n ],\n # don't reject custom functions, just leave them as strings\n reject_invalid=False,\n allow_none=True,\n )\n skip_on_upstream_skip = fields.Boolean(allow_none=True)\n cache_for = fields.TimeDelta(allow_none=True)\n cache_key = fields.String(allow_none=True)\n cache_validator = StatefulFunctionReference(\n valid_functions=[\n prefect.engine.cache_validators.never_use,\n prefect.engine.cache_validators.duration_only,\n prefect.engine.cache_validators.all_inputs,\n prefect.engine.cache_validators.all_parameters,\n prefect.engine.cache_validators.partial_inputs_only,\n prefect.engine.cache_validators.partial_parameters_only,\n ],\n # don't reject custom functions, just leave them as strings\n reject_invalid=False,\n allow_none=True,\n )\n auto_generated = fields.Boolean(allow_none=True)\n\n\nclass ParameterSchema(TaskMethodsMixin, ObjectSchema):\n class Meta:\n object_class = lambda: prefect.core.parameter.Parameter # type: ignore\n exclude_fields = [\"type\", \"outputs\", \"slug\"]\n\n type = fields.Function(lambda task: to_qualified_name(type(task)), lambda x: x)\n name = fields.String(required=True)\n slug = fields.String(allow_none=True)\n default = JSONCompatible(allow_none=True)\n required = fields.Boolean(allow_none=True)\n description = fields.String(allow_none=True)\n tags = SortedList(fields.String())\n outputs = fields.Method(\"load_outputs\", allow_none=True)\n", "path": "src/prefect/serialization/task.py"}]} | 3,375 | 215 |
gh_patches_debug_15677 | rasdani/github-patches | git_diff | conan-io__conan-center-index-7531 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[sqlitecpp/3.1.1] fix sqlitecpp extension
Specify library name and version: **sqlitecpp/3.1.1**
This is also a good place to share with all of us **why you are submitting this PR** (specially if it is a new addition to ConanCenter): is it a dependency of other libraries you want to package? Are you the author of the library? Thanks!
---
- [x] I've read the [guidelines](https://github.com/conan-io/conan-center-index/blob/master/docs/how_to_add_packages.md) for contributing.
- [x] I've followed the [PEP8](https://www.python.org/dev/peps/pep-0008/) style guides for Python code in the recipes.
- [x] I've used the [latest](https://github.com/conan-io/conan/releases/latest) Conan client version.
- [ ] I've tried at least one configuration locally with the
[conan-center hook](https://github.com/conan-io/hooks.git) activated.
</issue>
<code>
[start of recipes/sqlite3/all/conanfile.py]
1 from conans import ConanFile, CMake, tools
2 from conans.errors import ConanInvalidConfiguration
3 import os
4 import textwrap
5
6 required_conan_version = ">=1.33.0"
7
8
9 class ConanSqlite3(ConanFile):
10 name = "sqlite3"
11 description = "Self-contained, serverless, in-process SQL database engine."
12 url = "https://github.com/conan-io/conan-center-index"
13 homepage = "https://www.sqlite.org"
14 topics = ("conan", "sqlite", "database", "sql", "serverless")
15 license = "Unlicense"
16 generators = "cmake"
17 settings = "os", "compiler", "arch", "build_type"
18 exports_sources = ["CMakeLists.txt"]
19 options = {
20 "shared": [True, False],
21 "fPIC": [True, False],
22 "threadsafe": [0, 1, 2],
23 "enable_column_metadata": [True, False],
24 "enable_dbstat_vtab": [True, False],
25 "enable_explain_comments": [True, False],
26 "enable_fts3": [True, False],
27 "enable_fts3_parenthesis": [True, False],
28 "enable_fts4": [True, False],
29 "enable_fts5": [True, False],
30 "enable_json1": [True, False],
31 "enable_soundex": [True, False],
32 "enable_preupdate_hook": [True, False],
33 "enable_rtree": [True, False],
34 "use_alloca": [True, False],
35 "omit_load_extension": [True, False],
36 "enable_math_functions": [True, False],
37 "enable_unlock_notify": [True, False],
38 "enable_default_secure_delete": [True, False],
39 "disable_gethostuuid": [True, False],
40 "max_blob_size": "ANY",
41 "build_executable": [True, False],
42 "enable_default_vfs": [True, False],
43 }
44 default_options = {
45 "shared": False,
46 "fPIC": True,
47 "threadsafe": 1,
48 "enable_column_metadata": True,
49 "enable_dbstat_vtab": False,
50 "enable_explain_comments": False,
51 "enable_fts3": False,
52 "enable_fts3_parenthesis": False,
53 "enable_fts4": False,
54 "enable_fts5": False,
55 "enable_json1": False,
56 "enable_soundex": False,
57 "enable_preupdate_hook": False,
58 "enable_rtree": True,
59 "use_alloca": False,
60 "omit_load_extension": False,
61 "enable_math_functions": True,
62 "enable_unlock_notify": True,
63 "enable_default_secure_delete": False,
64 "disable_gethostuuid": False,
65 "max_blob_size": 1000000000,
66 "build_executable": True,
67 "enable_default_vfs": True,
68 }
69
70 _cmake = None
71
72 @property
73 def _source_subfolder(self):
74 return "source_subfolder"
75
76 @property
77 def _has_enable_math_function_option(self):
78 return tools.Version(self.version) >= "3.35.0"
79
80 def config_options(self):
81 if self.settings.os == "Windows":
82 del self.options.fPIC
83 if not self._has_enable_math_function_option:
84 del self.options.enable_math_functions
85
86 def configure(self):
87 if self.options.shared:
88 del self.options.fPIC
89 del self.settings.compiler.libcxx
90 del self.settings.compiler.cppstd
91
92 def validate(self):
93 if not self.options.enable_default_vfs and self.options.build_executable:
94 # Need to provide custom VFS code: https://www.sqlite.org/custombuild.html
95 raise ConanInvalidConfiguration("build_executable=True cannot be combined with enable_default_vfs=False")
96
97 def source(self):
98 tools.get(**self.conan_data["sources"][self.version], destination=self._source_subfolder, strip_root=True)
99
100 def _configure_cmake(self):
101 if self._cmake:
102 return self._cmake
103 self._cmake = CMake(self)
104 self._cmake.definitions["SQLITE3_VERSION"] = self.version
105 self._cmake.definitions["SQLITE3_BUILD_EXECUTABLE"] = self.options.build_executable
106 self._cmake.definitions["THREADSAFE"] = self.options.threadsafe
107 self._cmake.definitions["ENABLE_COLUMN_METADATA"] = self.options.enable_column_metadata
108 self._cmake.definitions["ENABLE_DBSTAT_VTAB"] = self.options.enable_dbstat_vtab
109 self._cmake.definitions["ENABLE_EXPLAIN_COMMENTS"] = self.options.enable_explain_comments
110 self._cmake.definitions["ENABLE_FTS3"] = self.options.enable_fts3
111 self._cmake.definitions["ENABLE_FTS3_PARENTHESIS"] = self.options.enable_fts3_parenthesis
112 self._cmake.definitions["ENABLE_FTS4"] = self.options.enable_fts4
113 self._cmake.definitions["ENABLE_FTS5"] = self.options.enable_fts5
114 self._cmake.definitions["ENABLE_JSON1"] = self.options.enable_json1
115 self._cmake.definitions["ENABLE_PREUPDATE_HOOK"] = self.options.enable_preupdate_hook
116 self._cmake.definitions["ENABLE_SOUNDEX"] = self.options.enable_soundex
117 self._cmake.definitions["ENABLE_RTREE"] = self.options.enable_rtree
118 self._cmake.definitions["ENABLE_UNLOCK_NOTIFY"] = self.options.enable_unlock_notify
119 self._cmake.definitions["ENABLE_DEFAULT_SECURE_DELETE"] = self.options.enable_default_secure_delete
120 self._cmake.definitions["USE_ALLOCA"] = self.options.use_alloca
121 self._cmake.definitions["OMIT_LOAD_EXTENSION"] = self.options.omit_load_extension
122 if self._has_enable_math_function_option:
123 self._cmake.definitions["ENABLE_MATH_FUNCTIONS"] = self.options.enable_math_functions
124 self._cmake.definitions["HAVE_FDATASYNC"] = True
125 self._cmake.definitions["HAVE_GMTIME_R"] = True
126 self._cmake.definitions["HAVE_LOCALTIME_R"] = self.settings.os != "Windows"
127 self._cmake.definitions["HAVE_POSIX_FALLOCATE"] = not (self.settings.os in ["Windows", "Android"] or tools.is_apple_os(self.settings.os))
128 self._cmake.definitions["HAVE_STRERROR_R"] = True
129 self._cmake.definitions["HAVE_USLEEP"] = True
130 self._cmake.definitions["DISABLE_GETHOSTUUID"] = self.options.disable_gethostuuid
131 self._cmake.definitions["MAX_BLOB_SIZE"] = self.options.max_blob_size
132 self._cmake.definitions["DISABLE_DEFAULT_VFS"] = not self.options.enable_default_vfs
133 self._cmake.configure()
134 return self._cmake
135
136 def build(self):
137 cmake = self._configure_cmake()
138 cmake.build()
139
140 def package(self):
141 header = tools.load(os.path.join(self._source_subfolder, "sqlite3.h"))
142 license_content = header[3:header.find("***", 1)]
143 tools.save(os.path.join(self.package_folder, "licenses", "LICENSE"), license_content)
144 cmake = self._configure_cmake()
145 cmake.install()
146 self._create_cmake_module_variables(
147 os.path.join(self.package_folder, self._module_file_rel_path)
148 )
149
150 @staticmethod
151 def _create_cmake_module_variables(module_file):
152 content = textwrap.dedent("""\
153 if(DEFINED SQLite_INCLUDE_DIRS)
154 set(SQLite3_INCLUDE_DIRS ${SQLite_INCLUDE_DIRS})
155 endif()
156 if(DEFINED SQLite_LIBRARIES)
157 set(SQLite3_LIBRARIES ${SQLite_LIBRARIES})
158 endif()
159 """)
160 tools.save(module_file, content)
161
162 @property
163 def _module_subfolder(self):
164 return os.path.join("lib", "cmake")
165
166 @property
167 def _module_file_rel_path(self):
168 return os.path.join(self._module_subfolder,
169 "conan-official-{}-variables.cmake".format(self.name))
170
171 def package_info(self):
172 self.cpp_info.filenames["cmake_find_package"] = "SQLite3"
173 self.cpp_info.filenames["cmake_find_package_multi"] = "SQLite3"
174 self.cpp_info.names["cmake_find_package"] = "SQLite"
175 self.cpp_info.names["cmake_find_package_multi"] = "SQLite"
176 self.cpp_info.components["sqlite"].names["cmake_find_package"] = "SQLite3"
177 self.cpp_info.components["sqlite"].names["cmake_find_package_multi"] = "SQLite3"
178 self.cpp_info.components["sqlite"].builddirs.append(self._module_subfolder)
179 self.cpp_info.components["sqlite"].build_modules["cmake_find_package"] = [self._module_file_rel_path]
180 self.cpp_info.components["sqlite"].libs = tools.collect_libs(self)
181 if self.settings.os in ["Linux", "FreeBSD"]:
182 if self.options.threadsafe:
183 self.cpp_info.components["sqlite"].system_libs.append("pthread")
184 if not self.options.omit_load_extension:
185 self.cpp_info.components["sqlite"].system_libs.append("dl")
186 if self.options.enable_fts5 or self.options.get_safe("enable_math_functions"):
187 self.cpp_info.components["sqlite"].system_libs.append("m")
188
189 if self.options.build_executable:
190 bin_path = os.path.join(self.package_folder, "bin")
191 self.output.info("Appending PATH env var with : {}".format(bin_path))
192 self.env_info.PATH.append(bin_path)
193
[end of recipes/sqlite3/all/conanfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/recipes/sqlite3/all/conanfile.py b/recipes/sqlite3/all/conanfile.py
--- a/recipes/sqlite3/all/conanfile.py
+++ b/recipes/sqlite3/all/conanfile.py
@@ -178,6 +178,8 @@
self.cpp_info.components["sqlite"].builddirs.append(self._module_subfolder)
self.cpp_info.components["sqlite"].build_modules["cmake_find_package"] = [self._module_file_rel_path]
self.cpp_info.components["sqlite"].libs = tools.collect_libs(self)
+ if self.options.omit_load_extension:
+ self.cpp_info.components["sqlite"].defines.append["SQLITE_OMIT_LOAD_EXTENSION"]
if self.settings.os in ["Linux", "FreeBSD"]:
if self.options.threadsafe:
self.cpp_info.components["sqlite"].system_libs.append("pthread")
| {"golden_diff": "diff --git a/recipes/sqlite3/all/conanfile.py b/recipes/sqlite3/all/conanfile.py\n--- a/recipes/sqlite3/all/conanfile.py\n+++ b/recipes/sqlite3/all/conanfile.py\n@@ -178,6 +178,8 @@\n self.cpp_info.components[\"sqlite\"].builddirs.append(self._module_subfolder)\n self.cpp_info.components[\"sqlite\"].build_modules[\"cmake_find_package\"] = [self._module_file_rel_path]\n self.cpp_info.components[\"sqlite\"].libs = tools.collect_libs(self)\n+ if self.options.omit_load_extension:\n+ self.cpp_info.components[\"sqlite\"].defines.append[\"SQLITE_OMIT_LOAD_EXTENSION\"]\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n if self.options.threadsafe:\n self.cpp_info.components[\"sqlite\"].system_libs.append(\"pthread\")\n", "issue": "[sqlitecpp/3.1.1] fix sqlitecpp extension\nSpecify library name and version: **sqlitecpp/3.1.1**\r\n\r\nThis is also a good place to share with all of us **why you are submitting this PR** (specially if it is a new addition to ConanCenter): is it a dependency of other libraries you want to package? Are you the author of the library? Thanks!\r\n\r\n---\r\n\r\n- [x] I've read the [guidelines](https://github.com/conan-io/conan-center-index/blob/master/docs/how_to_add_packages.md) for contributing.\r\n- [x] I've followed the [PEP8](https://www.python.org/dev/peps/pep-0008/) style guides for Python code in the recipes.\r\n- [x] I've used the [latest](https://github.com/conan-io/conan/releases/latest) Conan client version.\r\n- [ ] I've tried at least one configuration locally with the\r\n [conan-center hook](https://github.com/conan-io/hooks.git) activated.\r\n\n", "before_files": [{"content": "from conans import ConanFile, CMake, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\nimport textwrap\n\nrequired_conan_version = \">=1.33.0\"\n\n\nclass ConanSqlite3(ConanFile):\n name = \"sqlite3\"\n description = \"Self-contained, serverless, in-process SQL database engine.\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://www.sqlite.org\"\n topics = (\"conan\", \"sqlite\", \"database\", \"sql\", \"serverless\")\n license = \"Unlicense\"\n generators = \"cmake\"\n settings = \"os\", \"compiler\", \"arch\", \"build_type\"\n exports_sources = [\"CMakeLists.txt\"]\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"threadsafe\": [0, 1, 2],\n \"enable_column_metadata\": [True, False],\n \"enable_dbstat_vtab\": [True, False],\n \"enable_explain_comments\": [True, False],\n \"enable_fts3\": [True, False],\n \"enable_fts3_parenthesis\": [True, False],\n \"enable_fts4\": [True, False],\n \"enable_fts5\": [True, False],\n \"enable_json1\": [True, False],\n \"enable_soundex\": [True, False],\n \"enable_preupdate_hook\": [True, False],\n \"enable_rtree\": [True, False],\n \"use_alloca\": [True, False],\n \"omit_load_extension\": [True, False],\n \"enable_math_functions\": [True, False],\n \"enable_unlock_notify\": [True, False],\n \"enable_default_secure_delete\": [True, False],\n \"disable_gethostuuid\": [True, False],\n \"max_blob_size\": \"ANY\",\n \"build_executable\": [True, False],\n \"enable_default_vfs\": [True, False],\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"threadsafe\": 1,\n \"enable_column_metadata\": True,\n \"enable_dbstat_vtab\": False,\n \"enable_explain_comments\": False,\n \"enable_fts3\": False,\n \"enable_fts3_parenthesis\": False,\n \"enable_fts4\": False,\n \"enable_fts5\": False,\n \"enable_json1\": False,\n \"enable_soundex\": False,\n \"enable_preupdate_hook\": False,\n \"enable_rtree\": True,\n \"use_alloca\": False,\n \"omit_load_extension\": False,\n \"enable_math_functions\": True,\n \"enable_unlock_notify\": True,\n \"enable_default_secure_delete\": False,\n \"disable_gethostuuid\": False,\n \"max_blob_size\": 1000000000,\n \"build_executable\": True,\n \"enable_default_vfs\": True,\n }\n\n _cmake = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n @property\n def _has_enable_math_function_option(self):\n return tools.Version(self.version) >= \"3.35.0\"\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n if not self._has_enable_math_function_option:\n del self.options.enable_math_functions\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def validate(self):\n if not self.options.enable_default_vfs and self.options.build_executable:\n # Need to provide custom VFS code: https://www.sqlite.org/custombuild.html\n raise ConanInvalidConfiguration(\"build_executable=True cannot be combined with enable_default_vfs=False\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version], destination=self._source_subfolder, strip_root=True)\n\n def _configure_cmake(self):\n if self._cmake:\n return self._cmake\n self._cmake = CMake(self)\n self._cmake.definitions[\"SQLITE3_VERSION\"] = self.version\n self._cmake.definitions[\"SQLITE3_BUILD_EXECUTABLE\"] = self.options.build_executable\n self._cmake.definitions[\"THREADSAFE\"] = self.options.threadsafe\n self._cmake.definitions[\"ENABLE_COLUMN_METADATA\"] = self.options.enable_column_metadata\n self._cmake.definitions[\"ENABLE_DBSTAT_VTAB\"] = self.options.enable_dbstat_vtab\n self._cmake.definitions[\"ENABLE_EXPLAIN_COMMENTS\"] = self.options.enable_explain_comments\n self._cmake.definitions[\"ENABLE_FTS3\"] = self.options.enable_fts3\n self._cmake.definitions[\"ENABLE_FTS3_PARENTHESIS\"] = self.options.enable_fts3_parenthesis\n self._cmake.definitions[\"ENABLE_FTS4\"] = self.options.enable_fts4\n self._cmake.definitions[\"ENABLE_FTS5\"] = self.options.enable_fts5\n self._cmake.definitions[\"ENABLE_JSON1\"] = self.options.enable_json1\n self._cmake.definitions[\"ENABLE_PREUPDATE_HOOK\"] = self.options.enable_preupdate_hook\n self._cmake.definitions[\"ENABLE_SOUNDEX\"] = self.options.enable_soundex\n self._cmake.definitions[\"ENABLE_RTREE\"] = self.options.enable_rtree\n self._cmake.definitions[\"ENABLE_UNLOCK_NOTIFY\"] = self.options.enable_unlock_notify\n self._cmake.definitions[\"ENABLE_DEFAULT_SECURE_DELETE\"] = self.options.enable_default_secure_delete\n self._cmake.definitions[\"USE_ALLOCA\"] = self.options.use_alloca\n self._cmake.definitions[\"OMIT_LOAD_EXTENSION\"] = self.options.omit_load_extension\n if self._has_enable_math_function_option:\n self._cmake.definitions[\"ENABLE_MATH_FUNCTIONS\"] = self.options.enable_math_functions\n self._cmake.definitions[\"HAVE_FDATASYNC\"] = True\n self._cmake.definitions[\"HAVE_GMTIME_R\"] = True\n self._cmake.definitions[\"HAVE_LOCALTIME_R\"] = self.settings.os != \"Windows\"\n self._cmake.definitions[\"HAVE_POSIX_FALLOCATE\"] = not (self.settings.os in [\"Windows\", \"Android\"] or tools.is_apple_os(self.settings.os))\n self._cmake.definitions[\"HAVE_STRERROR_R\"] = True\n self._cmake.definitions[\"HAVE_USLEEP\"] = True\n self._cmake.definitions[\"DISABLE_GETHOSTUUID\"] = self.options.disable_gethostuuid\n self._cmake.definitions[\"MAX_BLOB_SIZE\"] = self.options.max_blob_size\n self._cmake.definitions[\"DISABLE_DEFAULT_VFS\"] = not self.options.enable_default_vfs\n self._cmake.configure()\n return self._cmake\n\n def build(self):\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n header = tools.load(os.path.join(self._source_subfolder, \"sqlite3.h\"))\n license_content = header[3:header.find(\"***\", 1)]\n tools.save(os.path.join(self.package_folder, \"licenses\", \"LICENSE\"), license_content)\n cmake = self._configure_cmake()\n cmake.install()\n self._create_cmake_module_variables(\n os.path.join(self.package_folder, self._module_file_rel_path)\n )\n\n @staticmethod\n def _create_cmake_module_variables(module_file):\n content = textwrap.dedent(\"\"\"\\\n if(DEFINED SQLite_INCLUDE_DIRS)\n set(SQLite3_INCLUDE_DIRS ${SQLite_INCLUDE_DIRS})\n endif()\n if(DEFINED SQLite_LIBRARIES)\n set(SQLite3_LIBRARIES ${SQLite_LIBRARIES})\n endif()\n \"\"\")\n tools.save(module_file, content)\n\n @property\n def _module_subfolder(self):\n return os.path.join(\"lib\", \"cmake\")\n\n @property\n def _module_file_rel_path(self):\n return os.path.join(self._module_subfolder,\n \"conan-official-{}-variables.cmake\".format(self.name))\n\n def package_info(self):\n self.cpp_info.filenames[\"cmake_find_package\"] = \"SQLite3\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"SQLite3\"\n self.cpp_info.names[\"cmake_find_package\"] = \"SQLite\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"SQLite\"\n self.cpp_info.components[\"sqlite\"].names[\"cmake_find_package\"] = \"SQLite3\"\n self.cpp_info.components[\"sqlite\"].names[\"cmake_find_package_multi\"] = \"SQLite3\"\n self.cpp_info.components[\"sqlite\"].builddirs.append(self._module_subfolder)\n self.cpp_info.components[\"sqlite\"].build_modules[\"cmake_find_package\"] = [self._module_file_rel_path]\n self.cpp_info.components[\"sqlite\"].libs = tools.collect_libs(self)\n if self.settings.os in [\"Linux\", \"FreeBSD\"]:\n if self.options.threadsafe:\n self.cpp_info.components[\"sqlite\"].system_libs.append(\"pthread\")\n if not self.options.omit_load_extension:\n self.cpp_info.components[\"sqlite\"].system_libs.append(\"dl\")\n if self.options.enable_fts5 or self.options.get_safe(\"enable_math_functions\"):\n self.cpp_info.components[\"sqlite\"].system_libs.append(\"m\")\n\n if self.options.build_executable:\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH env var with : {}\".format(bin_path))\n self.env_info.PATH.append(bin_path)\n", "path": "recipes/sqlite3/all/conanfile.py"}]} | 3,342 | 190 |
gh_patches_debug_2269 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-2501 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
AASIN and isfdb not editable
Somehow during the merge some code most be gone lost because...
<img width="640" alt="Bildschirmfoto 2022-12-11 um 21 29 47" src="https://user-images.githubusercontent.com/2017105/206927195-f9b27bcc-2f3a-46eb-ab1d-84340e5fa061.png">
</issue>
<code>
[start of bookwyrm/forms/books.py]
1 """ using django model forms """
2 from django import forms
3
4 from bookwyrm import models
5 from bookwyrm.models.fields import ClearableFileInputWithWarning
6 from .custom_form import CustomForm
7 from .widgets import ArrayWidget, SelectDateWidget, Select
8
9
10 # pylint: disable=missing-class-docstring
11 class CoverForm(CustomForm):
12 class Meta:
13 model = models.Book
14 fields = ["cover"]
15 help_texts = {f: None for f in fields}
16
17
18 class EditionForm(CustomForm):
19 class Meta:
20 model = models.Edition
21 fields = [
22 "title",
23 "subtitle",
24 "description",
25 "series",
26 "series_number",
27 "languages",
28 "subjects",
29 "publishers",
30 "first_published_date",
31 "published_date",
32 "cover",
33 "physical_format",
34 "physical_format_detail",
35 "pages",
36 "isbn_13",
37 "isbn_10",
38 "openlibrary_key",
39 "inventaire_id",
40 "goodreads_key",
41 "oclc_number",
42 "asin",
43 ]
44 widgets = {
45 "title": forms.TextInput(attrs={"aria-describedby": "desc_title"}),
46 "subtitle": forms.TextInput(attrs={"aria-describedby": "desc_subtitle"}),
47 "description": forms.Textarea(
48 attrs={"aria-describedby": "desc_description"}
49 ),
50 "series": forms.TextInput(attrs={"aria-describedby": "desc_series"}),
51 "series_number": forms.TextInput(
52 attrs={"aria-describedby": "desc_series_number"}
53 ),
54 "subjects": ArrayWidget(),
55 "languages": forms.TextInput(
56 attrs={"aria-describedby": "desc_languages_help desc_languages"}
57 ),
58 "publishers": forms.TextInput(
59 attrs={"aria-describedby": "desc_publishers_help desc_publishers"}
60 ),
61 "first_published_date": SelectDateWidget(
62 attrs={"aria-describedby": "desc_first_published_date"}
63 ),
64 "published_date": SelectDateWidget(
65 attrs={"aria-describedby": "desc_published_date"}
66 ),
67 "cover": ClearableFileInputWithWarning(
68 attrs={"aria-describedby": "desc_cover"}
69 ),
70 "physical_format": Select(
71 attrs={"aria-describedby": "desc_physical_format"}
72 ),
73 "physical_format_detail": forms.TextInput(
74 attrs={"aria-describedby": "desc_physical_format_detail"}
75 ),
76 "pages": forms.NumberInput(attrs={"aria-describedby": "desc_pages"}),
77 "isbn_13": forms.TextInput(attrs={"aria-describedby": "desc_isbn_13"}),
78 "isbn_10": forms.TextInput(attrs={"aria-describedby": "desc_isbn_10"}),
79 "openlibrary_key": forms.TextInput(
80 attrs={"aria-describedby": "desc_openlibrary_key"}
81 ),
82 "inventaire_id": forms.TextInput(
83 attrs={"aria-describedby": "desc_inventaire_id"}
84 ),
85 "goodreads_key": forms.TextInput(
86 attrs={"aria-describedby": "desc_goodreads_key"}
87 ),
88 "oclc_number": forms.TextInput(
89 attrs={"aria-describedby": "desc_oclc_number"}
90 ),
91 "ASIN": forms.TextInput(attrs={"aria-describedby": "desc_ASIN"}),
92 "AASIN": forms.TextInput(attrs={"aria-describedby": "desc_AASIN"}),
93 "isfdb": forms.TextInput(attrs={"aria-describedby": "desc_isfdb"}),
94 }
95
96
97 class EditionFromWorkForm(CustomForm):
98 def __init__(self, *args, **kwargs):
99 super().__init__(*args, **kwargs)
100 # make all fields hidden
101 for visible in self.visible_fields():
102 visible.field.widget = forms.HiddenInput()
103
104 class Meta:
105 model = models.Work
106 fields = [
107 "title",
108 "subtitle",
109 "authors",
110 "description",
111 "languages",
112 "series",
113 "series_number",
114 "subjects",
115 "subject_places",
116 "cover",
117 "first_published_date",
118 ]
119
[end of bookwyrm/forms/books.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/forms/books.py b/bookwyrm/forms/books.py
--- a/bookwyrm/forms/books.py
+++ b/bookwyrm/forms/books.py
@@ -40,6 +40,8 @@
"goodreads_key",
"oclc_number",
"asin",
+ "aasin",
+ "isfdb",
]
widgets = {
"title": forms.TextInput(attrs={"aria-describedby": "desc_title"}),
| {"golden_diff": "diff --git a/bookwyrm/forms/books.py b/bookwyrm/forms/books.py\n--- a/bookwyrm/forms/books.py\n+++ b/bookwyrm/forms/books.py\n@@ -40,6 +40,8 @@\n \"goodreads_key\",\n \"oclc_number\",\n \"asin\",\n+ \"aasin\",\n+ \"isfdb\",\n ]\n widgets = {\n \"title\": forms.TextInput(attrs={\"aria-describedby\": \"desc_title\"}),\n", "issue": "AASIN and isfdb not editable\nSomehow during the merge some code most be gone lost because...\r\n\r\n<img width=\"640\" alt=\"Bildschirm\u00adfoto 2022-12-11 um 21 29 47\" src=\"https://user-images.githubusercontent.com/2017105/206927195-f9b27bcc-2f3a-46eb-ab1d-84340e5fa061.png\">\r\n\n", "before_files": [{"content": "\"\"\" using django model forms \"\"\"\nfrom django import forms\n\nfrom bookwyrm import models\nfrom bookwyrm.models.fields import ClearableFileInputWithWarning\nfrom .custom_form import CustomForm\nfrom .widgets import ArrayWidget, SelectDateWidget, Select\n\n\n# pylint: disable=missing-class-docstring\nclass CoverForm(CustomForm):\n class Meta:\n model = models.Book\n fields = [\"cover\"]\n help_texts = {f: None for f in fields}\n\n\nclass EditionForm(CustomForm):\n class Meta:\n model = models.Edition\n fields = [\n \"title\",\n \"subtitle\",\n \"description\",\n \"series\",\n \"series_number\",\n \"languages\",\n \"subjects\",\n \"publishers\",\n \"first_published_date\",\n \"published_date\",\n \"cover\",\n \"physical_format\",\n \"physical_format_detail\",\n \"pages\",\n \"isbn_13\",\n \"isbn_10\",\n \"openlibrary_key\",\n \"inventaire_id\",\n \"goodreads_key\",\n \"oclc_number\",\n \"asin\",\n ]\n widgets = {\n \"title\": forms.TextInput(attrs={\"aria-describedby\": \"desc_title\"}),\n \"subtitle\": forms.TextInput(attrs={\"aria-describedby\": \"desc_subtitle\"}),\n \"description\": forms.Textarea(\n attrs={\"aria-describedby\": \"desc_description\"}\n ),\n \"series\": forms.TextInput(attrs={\"aria-describedby\": \"desc_series\"}),\n \"series_number\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_series_number\"}\n ),\n \"subjects\": ArrayWidget(),\n \"languages\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_languages_help desc_languages\"}\n ),\n \"publishers\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_publishers_help desc_publishers\"}\n ),\n \"first_published_date\": SelectDateWidget(\n attrs={\"aria-describedby\": \"desc_first_published_date\"}\n ),\n \"published_date\": SelectDateWidget(\n attrs={\"aria-describedby\": \"desc_published_date\"}\n ),\n \"cover\": ClearableFileInputWithWarning(\n attrs={\"aria-describedby\": \"desc_cover\"}\n ),\n \"physical_format\": Select(\n attrs={\"aria-describedby\": \"desc_physical_format\"}\n ),\n \"physical_format_detail\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_physical_format_detail\"}\n ),\n \"pages\": forms.NumberInput(attrs={\"aria-describedby\": \"desc_pages\"}),\n \"isbn_13\": forms.TextInput(attrs={\"aria-describedby\": \"desc_isbn_13\"}),\n \"isbn_10\": forms.TextInput(attrs={\"aria-describedby\": \"desc_isbn_10\"}),\n \"openlibrary_key\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_openlibrary_key\"}\n ),\n \"inventaire_id\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_inventaire_id\"}\n ),\n \"goodreads_key\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_goodreads_key\"}\n ),\n \"oclc_number\": forms.TextInput(\n attrs={\"aria-describedby\": \"desc_oclc_number\"}\n ),\n \"ASIN\": forms.TextInput(attrs={\"aria-describedby\": \"desc_ASIN\"}),\n \"AASIN\": forms.TextInput(attrs={\"aria-describedby\": \"desc_AASIN\"}),\n \"isfdb\": forms.TextInput(attrs={\"aria-describedby\": \"desc_isfdb\"}),\n }\n\n\nclass EditionFromWorkForm(CustomForm):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # make all fields hidden\n for visible in self.visible_fields():\n visible.field.widget = forms.HiddenInput()\n\n class Meta:\n model = models.Work\n fields = [\n \"title\",\n \"subtitle\",\n \"authors\",\n \"description\",\n \"languages\",\n \"series\",\n \"series_number\",\n \"subjects\",\n \"subject_places\",\n \"cover\",\n \"first_published_date\",\n ]\n", "path": "bookwyrm/forms/books.py"}]} | 1,729 | 97 |
gh_patches_debug_7845 | rasdani/github-patches | git_diff | aws__aws-cli-4458 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Service Catalog --support-url not being passed as String but does a HTTP get
Hi,
While utilizing the service catalog CLI if the parameter of --support-url is used for the create-product, the CLI does an actual HTTP get request on the URL provided instead of just giving the parameter as a string. Since the URL would return the entire HTTP response it would go over the limit of 2083 characters.
Even if the quotations are escaped a new error is retrieved, the only workaround I have found is to either have all the parameter values within a JSON file or to just have the support URL parameter within the JSON file.
Looking into the documentation of the behavior which could be causing this is here:
https://docs.aws.amazon.com/cli/latest/userguide/cli-usage-parameters.html#cli-usage-parameters-file-remote
Stating that:
"
The AWS CLI also supports loading parameters from a file hosted on the internet with an http:// or https:// URL
"
therefore I believe that the support url parameter is not being validated by the create-product CLI command but rather by the underlying CLI behavior.
</issue>
<code>
[start of awscli/paramfile.py]
1 # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import logging
14 import os
15 import copy
16
17 from botocore.awsrequest import AWSRequest
18 from botocore.httpsession import URLLib3Session
19 from botocore.exceptions import ProfileNotFound
20 from awscli.compat import six
21
22 from awscli.compat import compat_open
23 from awscli.argprocess import ParamError
24
25
26 logger = logging.getLogger(__name__)
27
28 # These are special cased arguments that do _not_ get the
29 # special param file processing. This is typically because it
30 # refers to an actual URI of some sort and we don't want to actually
31 # download the content (i.e TemplateURL in cloudformation).
32 PARAMFILE_DISABLED = set([
33 'api-gateway.put-integration.uri',
34 'api-gateway.create-integration.integration-uri',
35 'appstream.create-stack.redirect-url',
36 'appstream.create-stack.feedback-url',
37 'appstream.update-stack.redirect-url',
38 'appstream.update-stack.feedback-url',
39 'cloudformation.create-stack.template-url',
40 'cloudformation.update-stack.template-url',
41 'cloudformation.create-stack-set.template-url',
42 'cloudformation.update-stack-set.template-url',
43 'cloudformation.create-change-set.template-url',
44 'cloudformation.validate-template.template-url',
45 'cloudformation.estimate-template-cost.template-url',
46 'cloudformation.get-template-summary.template-url',
47
48 'cloudformation.create-stack.stack-policy-url',
49 'cloudformation.update-stack.stack-policy-url',
50 'cloudformation.set-stack-policy.stack-policy-url',
51 # aws cloudformation package --template-file
52 'custom.package.template-file',
53 # aws cloudformation deploy --template-file
54 'custom.deploy.template-file',
55
56 'cloudformation.update-stack.stack-policy-during-update-url',
57 # We will want to change the event name to ``s3`` as opposed to
58 # custom in the near future along with ``s3`` to ``s3api``.
59 'custom.cp.website-redirect',
60 'custom.mv.website-redirect',
61 'custom.sync.website-redirect',
62
63 'guardduty.create-ip-set.location',
64 'guardduty.update-ip-set.location',
65 'guardduty.create-threat-intel-set.location',
66 'guardduty.update-threat-intel-set.location',
67 'comprehend.detect-dominant-language.text',
68 'comprehend.batch-detect-dominant-language.text-list',
69 'comprehend.detect-entities.text',
70 'comprehend.batch-detect-entities.text-list',
71 'comprehend.detect-key-phrases.text',
72 'comprehend.batch-detect-key-phrases.text-list',
73 'comprehend.detect-sentiment.text',
74 'comprehend.batch-detect-sentiment.text-list',
75
76 'iam.create-open-id-connect-provider.url',
77
78 'machine-learning.predict.predict-endpoint',
79
80 'mediatailor.put-playback-configuration.ad-decision-server-url',
81 'mediatailor.put-playback-configuration.slate-ad-url',
82 'mediatailor.put-playback-configuration.video-content-source-url',
83
84 'rds.copy-db-cluster-snapshot.pre-signed-url',
85 'rds.create-db-cluster.pre-signed-url',
86 'rds.copy-db-snapshot.pre-signed-url',
87 'rds.create-db-instance-read-replica.pre-signed-url',
88
89 'sagemaker.create-notebook-instance.default-code-repository',
90 'sagemaker.create-notebook-instance.additional-code-repositories',
91 'sagemaker.update-notebook-instance.default-code-repository',
92 'sagemaker.update-notebook-instance.additional-code-repositories',
93
94 'serverlessapplicationrepository.create-application.home-page-url',
95 'serverlessapplicationrepository.create-application.license-url',
96 'serverlessapplicationrepository.create-application.readme-url',
97 'serverlessapplicationrepository.create-application.source-code-url',
98 'serverlessapplicationrepository.create-application.template-url',
99 'serverlessapplicationrepository.create-application-version.source-code-url',
100 'serverlessapplicationrepository.create-application-version.template-url',
101 'serverlessapplicationrepository.update-application.home-page-url',
102 'serverlessapplicationrepository.update-application.readme-url',
103
104 'sqs.add-permission.queue-url',
105 'sqs.change-message-visibility.queue-url',
106 'sqs.change-message-visibility-batch.queue-url',
107 'sqs.delete-message.queue-url',
108 'sqs.delete-message-batch.queue-url',
109 'sqs.delete-queue.queue-url',
110 'sqs.get-queue-attributes.queue-url',
111 'sqs.list-dead-letter-source-queues.queue-url',
112 'sqs.receive-message.queue-url',
113 'sqs.remove-permission.queue-url',
114 'sqs.send-message.queue-url',
115 'sqs.send-message-batch.queue-url',
116 'sqs.set-queue-attributes.queue-url',
117 'sqs.purge-queue.queue-url',
118 'sqs.list-queue-tags.queue-url',
119 'sqs.tag-queue.queue-url',
120 'sqs.untag-queue.queue-url',
121
122 's3.copy-object.website-redirect-location',
123 's3.create-multipart-upload.website-redirect-location',
124 's3.put-object.website-redirect-location',
125
126 # Double check that this has been renamed!
127 'sns.subscribe.notification-endpoint',
128
129 'iot.create-job.document-source',
130 'translate.translate-text.text',
131
132 'workdocs.create-notification-subscription.notification-endpoint'
133 ])
134
135
136 class ResourceLoadingError(Exception):
137 pass
138
139
140 def register_uri_param_handler(session, **kwargs):
141 prefix_map = copy.deepcopy(LOCAL_PREFIX_MAP)
142 try:
143 fetch_url = session.get_scoped_config().get(
144 'cli_follow_urlparam', 'true') == 'true'
145 except ProfileNotFound:
146 # If a --profile is provided that does not exist, loading
147 # a value from get_scoped_config will crash the CLI.
148 # This function can be called as the first handler for
149 # the session-initialized event, which happens before a
150 # profile can be created, even if the command would have
151 # successfully created a profile. Instead of crashing here
152 # on a ProfileNotFound the CLI should just use 'none'.
153 fetch_url = True
154
155 if fetch_url:
156 prefix_map.update(REMOTE_PREFIX_MAP)
157
158 handler = URIArgumentHandler(prefix_map)
159 session.register('load-cli-arg', handler)
160
161
162 class URIArgumentHandler(object):
163 def __init__(self, prefixes=None):
164 if prefixes is None:
165 prefixes = copy.deepcopy(LOCAL_PREFIX_MAP)
166 prefixes.update(REMOTE_PREFIX_MAP)
167 self._prefixes = prefixes
168
169 def __call__(self, event_name, param, value, **kwargs):
170 """Handler that supports param values from URIs."""
171 cli_argument = param
172 qualified_param_name = '.'.join(event_name.split('.')[1:])
173 if qualified_param_name in PARAMFILE_DISABLED or \
174 getattr(cli_argument, 'no_paramfile', None):
175 return
176 else:
177 return self._check_for_uri_param(cli_argument, value)
178
179 def _check_for_uri_param(self, param, value):
180 if isinstance(value, list) and len(value) == 1:
181 value = value[0]
182 try:
183 return get_paramfile(value, self._prefixes)
184 except ResourceLoadingError as e:
185 raise ParamError(param.cli_name, six.text_type(e))
186
187
188 def get_paramfile(path, cases):
189 """Load parameter based on a resource URI.
190
191 It is possible to pass parameters to operations by referring
192 to files or URI's. If such a reference is detected, this
193 function attempts to retrieve the data from the file or URI
194 and returns it. If there are any errors or if the ``path``
195 does not appear to refer to a file or URI, a ``None`` is
196 returned.
197
198 :type path: str
199 :param path: The resource URI, e.g. file://foo.txt. This value
200 may also be a non resource URI, in which case ``None`` is returned.
201
202 :type cases: dict
203 :param cases: A dictionary of URI prefixes to function mappings
204 that a parameter is checked against.
205
206 :return: The loaded value associated with the resource URI.
207 If the provided ``path`` is not a resource URI, then a
208 value of ``None`` is returned.
209
210 """
211 data = None
212 if isinstance(path, six.string_types):
213 for prefix, function_spec in cases.items():
214 if path.startswith(prefix):
215 function, kwargs = function_spec
216 data = function(prefix, path, **kwargs)
217 return data
218
219
220 def get_file(prefix, path, mode):
221 file_path = os.path.expandvars(os.path.expanduser(path[len(prefix):]))
222 try:
223 with compat_open(file_path, mode) as f:
224 return f.read()
225 except UnicodeDecodeError:
226 raise ResourceLoadingError(
227 'Unable to load paramfile (%s), text contents could '
228 'not be decoded. If this is a binary file, please use the '
229 'fileb:// prefix instead of the file:// prefix.' % file_path)
230 except (OSError, IOError) as e:
231 raise ResourceLoadingError('Unable to load paramfile %s: %s' % (
232 path, e))
233
234
235 def get_uri(prefix, uri):
236 try:
237 session = URLLib3Session()
238 r = session.send(AWSRequest('GET', uri).prepare())
239 if r.status_code == 200:
240 return r.text
241 else:
242 raise ResourceLoadingError(
243 "received non 200 status code of %s" % (
244 r.status_code))
245 except Exception as e:
246 raise ResourceLoadingError('Unable to retrieve %s: %s' % (uri, e))
247
248
249 LOCAL_PREFIX_MAP = {
250 'file://': (get_file, {'mode': 'r'}),
251 'fileb://': (get_file, {'mode': 'rb'}),
252 }
253
254
255 REMOTE_PREFIX_MAP = {
256 'http://': (get_uri, {}),
257 'https://': (get_uri, {}),
258 }
259
[end of awscli/paramfile.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/paramfile.py b/awscli/paramfile.py
--- a/awscli/paramfile.py
+++ b/awscli/paramfile.py
@@ -101,6 +101,9 @@
'serverlessapplicationrepository.update-application.home-page-url',
'serverlessapplicationrepository.update-application.readme-url',
+ 'service-catalog.create-product.support-url',
+ 'service-catalog.update-product.support-url',
+
'sqs.add-permission.queue-url',
'sqs.change-message-visibility.queue-url',
'sqs.change-message-visibility-batch.queue-url',
| {"golden_diff": "diff --git a/awscli/paramfile.py b/awscli/paramfile.py\n--- a/awscli/paramfile.py\n+++ b/awscli/paramfile.py\n@@ -101,6 +101,9 @@\n 'serverlessapplicationrepository.update-application.home-page-url',\n 'serverlessapplicationrepository.update-application.readme-url',\n \n+ 'service-catalog.create-product.support-url',\n+ 'service-catalog.update-product.support-url',\n+\n 'sqs.add-permission.queue-url',\n 'sqs.change-message-visibility.queue-url',\n 'sqs.change-message-visibility-batch.queue-url',\n", "issue": "Service Catalog --support-url not being passed as String but does a HTTP get\nHi,\r\n\r\nWhile utilizing the service catalog CLI if the parameter of --support-url is used for the create-product, the CLI does an actual HTTP get request on the URL provided instead of just giving the parameter as a string. Since the URL would return the entire HTTP response it would go over the limit of 2083 characters. \r\n\r\nEven if the quotations are escaped a new error is retrieved, the only workaround I have found is to either have all the parameter values within a JSON file or to just have the support URL parameter within the JSON file.\r\n\r\nLooking into the documentation of the behavior which could be causing this is here:\r\nhttps://docs.aws.amazon.com/cli/latest/userguide/cli-usage-parameters.html#cli-usage-parameters-file-remote\r\n\r\nStating that:\r\n\"\r\nThe AWS CLI also supports loading parameters from a file hosted on the internet with an http:// or https:// URL\r\n\"\r\ntherefore I believe that the support url parameter is not being validated by the create-product CLI command but rather by the underlying CLI behavior.\n", "before_files": [{"content": "# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport logging\nimport os\nimport copy\n\nfrom botocore.awsrequest import AWSRequest\nfrom botocore.httpsession import URLLib3Session\nfrom botocore.exceptions import ProfileNotFound\nfrom awscli.compat import six\n\nfrom awscli.compat import compat_open\nfrom awscli.argprocess import ParamError\n\n\nlogger = logging.getLogger(__name__)\n\n# These are special cased arguments that do _not_ get the\n# special param file processing. This is typically because it\n# refers to an actual URI of some sort and we don't want to actually\n# download the content (i.e TemplateURL in cloudformation).\nPARAMFILE_DISABLED = set([\n 'api-gateway.put-integration.uri',\n 'api-gateway.create-integration.integration-uri',\n 'appstream.create-stack.redirect-url',\n 'appstream.create-stack.feedback-url',\n 'appstream.update-stack.redirect-url',\n 'appstream.update-stack.feedback-url',\n 'cloudformation.create-stack.template-url',\n 'cloudformation.update-stack.template-url',\n 'cloudformation.create-stack-set.template-url',\n 'cloudformation.update-stack-set.template-url',\n 'cloudformation.create-change-set.template-url',\n 'cloudformation.validate-template.template-url',\n 'cloudformation.estimate-template-cost.template-url',\n 'cloudformation.get-template-summary.template-url',\n\n 'cloudformation.create-stack.stack-policy-url',\n 'cloudformation.update-stack.stack-policy-url',\n 'cloudformation.set-stack-policy.stack-policy-url',\n # aws cloudformation package --template-file\n 'custom.package.template-file',\n # aws cloudformation deploy --template-file\n 'custom.deploy.template-file',\n\n 'cloudformation.update-stack.stack-policy-during-update-url',\n # We will want to change the event name to ``s3`` as opposed to\n # custom in the near future along with ``s3`` to ``s3api``.\n 'custom.cp.website-redirect',\n 'custom.mv.website-redirect',\n 'custom.sync.website-redirect',\n\n 'guardduty.create-ip-set.location',\n 'guardduty.update-ip-set.location',\n 'guardduty.create-threat-intel-set.location',\n 'guardduty.update-threat-intel-set.location',\n 'comprehend.detect-dominant-language.text',\n 'comprehend.batch-detect-dominant-language.text-list',\n 'comprehend.detect-entities.text',\n 'comprehend.batch-detect-entities.text-list',\n 'comprehend.detect-key-phrases.text',\n 'comprehend.batch-detect-key-phrases.text-list',\n 'comprehend.detect-sentiment.text',\n 'comprehend.batch-detect-sentiment.text-list',\n\n 'iam.create-open-id-connect-provider.url',\n\n 'machine-learning.predict.predict-endpoint',\n\n 'mediatailor.put-playback-configuration.ad-decision-server-url',\n 'mediatailor.put-playback-configuration.slate-ad-url',\n 'mediatailor.put-playback-configuration.video-content-source-url',\n\n 'rds.copy-db-cluster-snapshot.pre-signed-url',\n 'rds.create-db-cluster.pre-signed-url',\n 'rds.copy-db-snapshot.pre-signed-url',\n 'rds.create-db-instance-read-replica.pre-signed-url',\n\n 'sagemaker.create-notebook-instance.default-code-repository',\n 'sagemaker.create-notebook-instance.additional-code-repositories',\n 'sagemaker.update-notebook-instance.default-code-repository',\n 'sagemaker.update-notebook-instance.additional-code-repositories',\n\n 'serverlessapplicationrepository.create-application.home-page-url',\n 'serverlessapplicationrepository.create-application.license-url',\n 'serverlessapplicationrepository.create-application.readme-url',\n 'serverlessapplicationrepository.create-application.source-code-url',\n 'serverlessapplicationrepository.create-application.template-url',\n 'serverlessapplicationrepository.create-application-version.source-code-url',\n 'serverlessapplicationrepository.create-application-version.template-url',\n 'serverlessapplicationrepository.update-application.home-page-url',\n 'serverlessapplicationrepository.update-application.readme-url',\n\n 'sqs.add-permission.queue-url',\n 'sqs.change-message-visibility.queue-url',\n 'sqs.change-message-visibility-batch.queue-url',\n 'sqs.delete-message.queue-url',\n 'sqs.delete-message-batch.queue-url',\n 'sqs.delete-queue.queue-url',\n 'sqs.get-queue-attributes.queue-url',\n 'sqs.list-dead-letter-source-queues.queue-url',\n 'sqs.receive-message.queue-url',\n 'sqs.remove-permission.queue-url',\n 'sqs.send-message.queue-url',\n 'sqs.send-message-batch.queue-url',\n 'sqs.set-queue-attributes.queue-url',\n 'sqs.purge-queue.queue-url',\n 'sqs.list-queue-tags.queue-url',\n 'sqs.tag-queue.queue-url',\n 'sqs.untag-queue.queue-url',\n\n 's3.copy-object.website-redirect-location',\n 's3.create-multipart-upload.website-redirect-location',\n 's3.put-object.website-redirect-location',\n\n # Double check that this has been renamed!\n 'sns.subscribe.notification-endpoint',\n\n 'iot.create-job.document-source',\n 'translate.translate-text.text',\n\n 'workdocs.create-notification-subscription.notification-endpoint'\n])\n\n\nclass ResourceLoadingError(Exception):\n pass\n\n\ndef register_uri_param_handler(session, **kwargs):\n prefix_map = copy.deepcopy(LOCAL_PREFIX_MAP)\n try:\n fetch_url = session.get_scoped_config().get(\n 'cli_follow_urlparam', 'true') == 'true'\n except ProfileNotFound:\n # If a --profile is provided that does not exist, loading\n # a value from get_scoped_config will crash the CLI.\n # This function can be called as the first handler for\n # the session-initialized event, which happens before a\n # profile can be created, even if the command would have\n # successfully created a profile. Instead of crashing here\n # on a ProfileNotFound the CLI should just use 'none'.\n fetch_url = True\n\n if fetch_url:\n prefix_map.update(REMOTE_PREFIX_MAP)\n\n handler = URIArgumentHandler(prefix_map)\n session.register('load-cli-arg', handler)\n\n\nclass URIArgumentHandler(object):\n def __init__(self, prefixes=None):\n if prefixes is None:\n prefixes = copy.deepcopy(LOCAL_PREFIX_MAP)\n prefixes.update(REMOTE_PREFIX_MAP)\n self._prefixes = prefixes\n\n def __call__(self, event_name, param, value, **kwargs):\n \"\"\"Handler that supports param values from URIs.\"\"\"\n cli_argument = param\n qualified_param_name = '.'.join(event_name.split('.')[1:])\n if qualified_param_name in PARAMFILE_DISABLED or \\\n getattr(cli_argument, 'no_paramfile', None):\n return\n else:\n return self._check_for_uri_param(cli_argument, value)\n\n def _check_for_uri_param(self, param, value):\n if isinstance(value, list) and len(value) == 1:\n value = value[0]\n try:\n return get_paramfile(value, self._prefixes)\n except ResourceLoadingError as e:\n raise ParamError(param.cli_name, six.text_type(e))\n\n\ndef get_paramfile(path, cases):\n \"\"\"Load parameter based on a resource URI.\n\n It is possible to pass parameters to operations by referring\n to files or URI's. If such a reference is detected, this\n function attempts to retrieve the data from the file or URI\n and returns it. If there are any errors or if the ``path``\n does not appear to refer to a file or URI, a ``None`` is\n returned.\n\n :type path: str\n :param path: The resource URI, e.g. file://foo.txt. This value\n may also be a non resource URI, in which case ``None`` is returned.\n\n :type cases: dict\n :param cases: A dictionary of URI prefixes to function mappings\n that a parameter is checked against.\n\n :return: The loaded value associated with the resource URI.\n If the provided ``path`` is not a resource URI, then a\n value of ``None`` is returned.\n\n \"\"\"\n data = None\n if isinstance(path, six.string_types):\n for prefix, function_spec in cases.items():\n if path.startswith(prefix):\n function, kwargs = function_spec\n data = function(prefix, path, **kwargs)\n return data\n\n\ndef get_file(prefix, path, mode):\n file_path = os.path.expandvars(os.path.expanduser(path[len(prefix):]))\n try:\n with compat_open(file_path, mode) as f:\n return f.read()\n except UnicodeDecodeError:\n raise ResourceLoadingError(\n 'Unable to load paramfile (%s), text contents could '\n 'not be decoded. If this is a binary file, please use the '\n 'fileb:// prefix instead of the file:// prefix.' % file_path)\n except (OSError, IOError) as e:\n raise ResourceLoadingError('Unable to load paramfile %s: %s' % (\n path, e))\n\n\ndef get_uri(prefix, uri):\n try:\n session = URLLib3Session()\n r = session.send(AWSRequest('GET', uri).prepare())\n if r.status_code == 200:\n return r.text\n else:\n raise ResourceLoadingError(\n \"received non 200 status code of %s\" % (\n r.status_code))\n except Exception as e:\n raise ResourceLoadingError('Unable to retrieve %s: %s' % (uri, e))\n\n\nLOCAL_PREFIX_MAP = {\n 'file://': (get_file, {'mode': 'r'}),\n 'fileb://': (get_file, {'mode': 'rb'}),\n}\n\n\nREMOTE_PREFIX_MAP = {\n 'http://': (get_uri, {}),\n 'https://': (get_uri, {}),\n}\n", "path": "awscli/paramfile.py"}]} | 3,705 | 132 |
gh_patches_debug_2626 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-7221 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[CT-1943] Loosen pin on `jsonschema` (via `hologram`)
For more context on our latest thinking around dependencies (how & why we pin today, and how we want it to change):
- https://github.com/dbt-labs/dbt-core/discussions/6495
### Summary
`dbt-core` depends on `hologram`, and as such it also includes `hologram`'s transitive dependencies on `jsonschema` and `python-dateutil`. `hologram`'s upper bound on `jsonschema` in particular is causing issues for some folks trying to install `dbt-core` alongside other popular tools, such as Airflow:
- https://github.com/dbt-labs/hologram/issues/52
- https://github.com/dbt-labs/hologram/pull/51
### Short term
- Try removing upper bound on `jsonschema`
- Release a new version of `hologram` with no / looser upper bound
- Support the new version of `hologram` [in `dbt-core`](https://github.com/dbt-labs/dbt-core/blob/a8abc496323f741d3218d298d5d2bb118fa01017/core/setup.py#L54)
### Medium term
Remove `dbt-core`'s dependency on `hologram` entirely. It doesn't do nearly as much for us today as it used to, and the validation errors it raises aren't even all that nice.
- https://github.com/dbt-labs/dbt-core/issues/6776
</issue>
<code>
[start of core/setup.py]
1 #!/usr/bin/env python
2 import os
3 import sys
4
5 if sys.version_info < (3, 7, 2):
6 print("Error: dbt does not support this version of Python.")
7 print("Please upgrade to Python 3.7.2 or higher.")
8 sys.exit(1)
9
10
11 from setuptools import setup
12
13 try:
14 from setuptools import find_namespace_packages
15 except ImportError:
16 # the user has a downlevel version of setuptools.
17 print("Error: dbt requires setuptools v40.1.0 or higher.")
18 print('Please upgrade setuptools with "pip install --upgrade setuptools" ' "and try again")
19 sys.exit(1)
20
21
22 this_directory = os.path.abspath(os.path.dirname(__file__))
23 with open(os.path.join(this_directory, "README.md")) as f:
24 long_description = f.read()
25
26
27 package_name = "dbt-core"
28 package_version = "1.5.0b4"
29 description = """With dbt, data analysts and engineers can build analytics \
30 the way engineers build applications."""
31
32
33 setup(
34 name=package_name,
35 version=package_version,
36 description=description,
37 long_description=long_description,
38 long_description_content_type="text/markdown",
39 author="dbt Labs",
40 author_email="[email protected]",
41 url="https://github.com/dbt-labs/dbt-core",
42 packages=find_namespace_packages(include=["dbt", "dbt.*"]),
43 include_package_data=True,
44 test_suite="test",
45 entry_points={
46 "console_scripts": ["dbt = dbt.cli.main:cli"],
47 },
48 install_requires=[
49 "Jinja2==3.1.2",
50 "agate>=1.6,<1.7.1",
51 "click>=7.0,<9",
52 "colorama>=0.3.9,<0.4.7",
53 "hologram>=0.0.14,<=0.0.15",
54 "isodate>=0.6,<0.7",
55 "logbook>=1.5,<1.6",
56 "mashumaro[msgpack]==3.3.1",
57 "minimal-snowplow-tracker==0.0.2",
58 "networkx>=2.3,<2.8.1;python_version<'3.8'",
59 "networkx>=2.3,<3;python_version>='3.8'",
60 "packaging>20.9",
61 "sqlparse>=0.2.3,<0.5",
62 "dbt-extractor~=0.4.1",
63 "typing-extensions>=3.7.4",
64 "werkzeug>=1,<3",
65 "pathspec>=0.9,<0.12",
66 "protobuf>=3.18.3",
67 "pytz>=2015.7",
68 # the following are all to match snowflake-connector-python
69 "requests<3.0.0",
70 "idna>=2.5,<4",
71 "cffi>=1.9,<2.0.0",
72 "pyyaml>=6.0",
73 ],
74 zip_safe=False,
75 classifiers=[
76 "Development Status :: 5 - Production/Stable",
77 "License :: OSI Approved :: Apache Software License",
78 "Operating System :: Microsoft :: Windows",
79 "Operating System :: MacOS :: MacOS X",
80 "Operating System :: POSIX :: Linux",
81 "Programming Language :: Python :: 3.7",
82 "Programming Language :: Python :: 3.8",
83 "Programming Language :: Python :: 3.9",
84 "Programming Language :: Python :: 3.10",
85 "Programming Language :: Python :: 3.11",
86 ],
87 python_requires=">=3.7.2",
88 )
89
[end of core/setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/setup.py b/core/setup.py
--- a/core/setup.py
+++ b/core/setup.py
@@ -50,7 +50,7 @@
"agate>=1.6,<1.7.1",
"click>=7.0,<9",
"colorama>=0.3.9,<0.4.7",
- "hologram>=0.0.14,<=0.0.15",
+ "hologram>=0.0.14,<=0.0.16",
"isodate>=0.6,<0.7",
"logbook>=1.5,<1.6",
"mashumaro[msgpack]==3.3.1",
| {"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -50,7 +50,7 @@\n \"agate>=1.6,<1.7.1\",\n \"click>=7.0,<9\",\n \"colorama>=0.3.9,<0.4.7\",\n- \"hologram>=0.0.14,<=0.0.15\",\n+ \"hologram>=0.0.14,<=0.0.16\",\n \"isodate>=0.6,<0.7\",\n \"logbook>=1.5,<1.6\",\n \"mashumaro[msgpack]==3.3.1\",\n", "issue": "[CT-1943] Loosen pin on `jsonschema` (via `hologram`)\nFor more context on our latest thinking around dependencies (how & why we pin today, and how we want it to change):\r\n- https://github.com/dbt-labs/dbt-core/discussions/6495\r\n\r\n### Summary\r\n\r\n`dbt-core` depends on `hologram`, and as such it also includes `hologram`'s transitive dependencies on `jsonschema` and `python-dateutil`. `hologram`'s upper bound on `jsonschema` in particular is causing issues for some folks trying to install `dbt-core` alongside other popular tools, such as Airflow:\r\n- https://github.com/dbt-labs/hologram/issues/52\r\n- https://github.com/dbt-labs/hologram/pull/51\r\n\r\n### Short term\r\n\r\n- Try removing upper bound on `jsonschema`\r\n- Release a new version of `hologram` with no / looser upper bound\r\n- Support the new version of `hologram` [in `dbt-core`](https://github.com/dbt-labs/dbt-core/blob/a8abc496323f741d3218d298d5d2bb118fa01017/core/setup.py#L54)\r\n\r\n### Medium term\r\n\r\nRemove `dbt-core`'s dependency on `hologram` entirely. It doesn't do nearly as much for us today as it used to, and the validation errors it raises aren't even all that nice.\r\n- https://github.com/dbt-labs/dbt-core/issues/6776\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 7, 2):\n print(\"Error: dbt does not support this version of Python.\")\n print(\"Please upgrade to Python 3.7.2 or higher.\")\n sys.exit(1)\n\n\nfrom setuptools import setup\n\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print(\"Error: dbt requires setuptools v40.1.0 or higher.\")\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" ' \"and try again\")\n sys.exit(1)\n\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\")) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"1.5.0b4\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=[\"dbt\", \"dbt.*\"]),\n include_package_data=True,\n test_suite=\"test\",\n entry_points={\n \"console_scripts\": [\"dbt = dbt.cli.main:cli\"],\n },\n install_requires=[\n \"Jinja2==3.1.2\",\n \"agate>=1.6,<1.7.1\",\n \"click>=7.0,<9\",\n \"colorama>=0.3.9,<0.4.7\",\n \"hologram>=0.0.14,<=0.0.15\",\n \"isodate>=0.6,<0.7\",\n \"logbook>=1.5,<1.6\",\n \"mashumaro[msgpack]==3.3.1\",\n \"minimal-snowplow-tracker==0.0.2\",\n \"networkx>=2.3,<2.8.1;python_version<'3.8'\",\n \"networkx>=2.3,<3;python_version>='3.8'\",\n \"packaging>20.9\",\n \"sqlparse>=0.2.3,<0.5\",\n \"dbt-extractor~=0.4.1\",\n \"typing-extensions>=3.7.4\",\n \"werkzeug>=1,<3\",\n \"pathspec>=0.9,<0.12\",\n \"protobuf>=3.18.3\",\n \"pytz>=2015.7\",\n # the following are all to match snowflake-connector-python\n \"requests<3.0.0\",\n \"idna>=2.5,<4\",\n \"cffi>=1.9,<2.0.0\",\n \"pyyaml>=6.0\",\n ],\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n python_requires=\">=3.7.2\",\n)\n", "path": "core/setup.py"}]} | 1,870 | 162 |
gh_patches_debug_31646 | rasdani/github-patches | git_diff | lightly-ai__lightly-272 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
CO2 regularizer breaks with PyTorch 1.4.0
# CO2 regularizer breaks with PyTorch 1.4.0
The `KLDivLoss` in PyTorch only received the `log_target` flag after 1.4.0 so we need to handle the case where the flag is not available.
</issue>
<code>
[start of lightly/loss/regularizer/co2.py]
1 """ CO2 Regularizer """
2
3 # Copyright (c) 2020. Lightly AG and its affiliates.
4 # All Rights Reserved
5
6 import torch
7 from lightly.loss.memory_bank import MemoryBankModule
8
9
10 class CO2Regularizer(MemoryBankModule):
11 """Implementation of the CO2 regularizer [0] for self-supervised learning.
12
13 [0] CO2, 2021, https://arxiv.org/abs/2010.02217
14
15 Attributes:
16 alpha:
17 Weight of the regularization term.
18 t_consistency:
19 Temperature used during softmax calculations.
20 memory_bank_size:
21 Number of negative samples to store in the memory bank.
22 Use 0 to use the second batch for negative samples.
23
24 Examples:
25 >>> # initialize loss function for MoCo
26 >>> loss_fn = NTXentLoss(memory_bank_size=4096)
27 >>>
28 >>> # initialize CO2 regularizer
29 >>> co2 = CO2Regularizer(alpha=1.0, memory_bank_size=4096)
30 >>>
31 >>> # generate two random trasnforms of images
32 >>> t0 = transforms(images)
33 >>> t1 = transforms(images)
34 >>>
35 >>> # feed through the MoCo model
36 >>> out0, out1 = model(t0, t1)
37 >>>
38 >>> # calculate loss and apply regularizer
39 >>> loss = loss_fn(out0, out1) + co2(out0, out1)
40
41 """
42
43 def __init__(self,
44 alpha: float = 1,
45 t_consistency: float = 0.05,
46 memory_bank_size: int = 0):
47
48 super(CO2Regularizer, self).__init__(size=memory_bank_size)
49 self.kl_div = torch.nn.KLDivLoss(reduction='batchmean', log_target=True)
50 self.t_consistency = t_consistency
51 self.alpha = alpha
52
53 def _get_pseudo_labels(self,
54 out0: torch.Tensor,
55 out1: torch.Tensor,
56 negatives: torch.Tensor = None):
57 """Computes the soft pseudo labels across negative samples.
58
59 Args:
60 out0:
61 Output projections of the first set of transformed images (query).
62 Shape: bsz x n_ftrs
63 out1:
64 Output projections of the second set of transformed images (positive sample).
65 Shape: bsz x n_ftrs
66 negatives:
67 Negative samples to compare against. If this is None, the second
68 batch of images will be used as negative samples.
69 Shape: memory_bank_size x n_ftrs
70
71 Returns:
72 Log probability that a positive samples will classify each negative
73 sample as the positive sample.
74 Shape: bsz x (bsz - 1) or bsz x memory_bank_size
75
76 """
77 batch_size, _ = out0.shape
78 if negatives is None:
79 # use second batch as negative samples
80 # l_pos has shape bsz x 1 and l_neg has shape bsz x bsz
81 l_pos = torch.einsum('nc,nc->n', [out0, out1]).unsqueeze(-1)
82 l_neg = torch.einsum('nc,ck->nk', [out0, out1.t()])
83 # remove elements on the diagonal
84 # l_neg has shape bsz x (bsz - 1)
85 l_neg = l_neg.masked_select(
86 ~torch.eye(batch_size, dtype=bool, device=l_neg.device)
87 ).view(batch_size, batch_size - 1)
88 else:
89 # use memory bank as negative samples
90 # l_pos has shape bsz x 1 and l_neg has shape bsz x memory_bank_size
91 negatives = negatives.to(out0.device)
92 l_pos = torch.einsum('nc,nc->n', [out0, out1]).unsqueeze(-1)
93 l_neg = torch.einsum('nc,ck->nk', [out0, negatives.clone().detach()])
94
95 # concatenate such that positive samples are at index 0
96 logits = torch.cat([l_pos, l_neg], dim=1)
97 # divide by temperature
98 logits = logits / self.t_consistency
99
100 # the input to kl_div is expected to be log(p) and we set the
101 # flag log_target to True, so both probabilities should be passed as log
102 log_probs = torch.nn.functional.log_softmax(logits, dim=-1)
103 return log_probs
104
105
106 def forward(self,
107 out0: torch.Tensor,
108 out1: torch.Tensor):
109 """Computes the CO2 regularization term for two model outputs.
110
111 Args:
112 out0:
113 Output projections of the first set of transformed images.
114 out1:
115 Output projections of the second set of transformed images.
116
117 Returns:
118 The regularization term multiplied by the weight factor alpha.
119
120 """
121
122 # normalize the output to length 1
123 out0 = torch.nn.functional.normalize(out0, dim=1)
124 out1 = torch.nn.functional.normalize(out1, dim=1)
125
126 # ask memory bank for negative samples and extend it with out1 if
127 # out1 requires a gradient, otherwise keep the same vectors in the
128 # memory bank (this allows for keeping the memory bank constant e.g.
129 # for evaluating the loss on the test set)
130 # if the memory_bank size is 0, negatives will be None
131 out1, negatives = \
132 super(CO2Regularizer, self).forward(out1, update=True)
133
134 # get log probabilities
135 p = self._get_pseudo_labels(out0, out1, negatives)
136 q = self._get_pseudo_labels(out1, out0, negatives)
137
138 # calculate kullback leibler divergence from log probabilities
139 return self.alpha * 0.5 * (self.kl_div(p, q) + self.kl_div(q, p))
140
[end of lightly/loss/regularizer/co2.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lightly/loss/regularizer/co2.py b/lightly/loss/regularizer/co2.py
--- a/lightly/loss/regularizer/co2.py
+++ b/lightly/loss/regularizer/co2.py
@@ -46,7 +46,19 @@
memory_bank_size: int = 0):
super(CO2Regularizer, self).__init__(size=memory_bank_size)
- self.kl_div = torch.nn.KLDivLoss(reduction='batchmean', log_target=True)
+ # try-catch the KLDivLoss construction for backwards compatability
+ self.log_target = True
+ try:
+ self.kl_div = torch.nn.KLDivLoss(
+ reduction='batchmean',
+ log_target=True
+ )
+ except TypeError:
+ self.log_target = False
+ self.kl_div = torch.nn.KLDivLoss(
+ reduction='batchmean'
+ )
+
self.t_consistency = t_consistency
self.alpha = alpha
@@ -97,10 +109,8 @@
# divide by temperature
logits = logits / self.t_consistency
- # the input to kl_div is expected to be log(p) and we set the
- # flag log_target to True, so both probabilities should be passed as log
- log_probs = torch.nn.functional.log_softmax(logits, dim=-1)
- return log_probs
+ # the input to kl_div is expected to be log(p)
+ return torch.nn.functional.log_softmax(logits, dim=-1)
def forward(self,
@@ -135,5 +145,11 @@
p = self._get_pseudo_labels(out0, out1, negatives)
q = self._get_pseudo_labels(out1, out0, negatives)
- # calculate kullback leibler divergence from log probabilities
- return self.alpha * 0.5 * (self.kl_div(p, q) + self.kl_div(q, p))
+ # calculate symmetrized kullback leibler divergence
+ if self.log_target:
+ div = self.kl_div(p, q) + self.kl_div(q, p)
+ else:
+ # can't use log_target because of early torch version
+ div = self.kl_div(p, torch.exp(q)) + self.kl_div(q, torch.exp(p))
+
+ return self.alpha * 0.5 * div
| {"golden_diff": "diff --git a/lightly/loss/regularizer/co2.py b/lightly/loss/regularizer/co2.py\n--- a/lightly/loss/regularizer/co2.py\n+++ b/lightly/loss/regularizer/co2.py\n@@ -46,7 +46,19 @@\n memory_bank_size: int = 0):\n \n super(CO2Regularizer, self).__init__(size=memory_bank_size)\n- self.kl_div = torch.nn.KLDivLoss(reduction='batchmean', log_target=True)\n+ # try-catch the KLDivLoss construction for backwards compatability\n+ self.log_target = True\n+ try:\n+ self.kl_div = torch.nn.KLDivLoss(\n+ reduction='batchmean',\n+ log_target=True\n+ )\n+ except TypeError:\n+ self.log_target = False\n+ self.kl_div = torch.nn.KLDivLoss(\n+ reduction='batchmean'\n+ )\n+\n self.t_consistency = t_consistency\n self.alpha = alpha\n \n@@ -97,10 +109,8 @@\n # divide by temperature\n logits = logits / self.t_consistency\n \n- # the input to kl_div is expected to be log(p) and we set the\n- # flag log_target to True, so both probabilities should be passed as log\n- log_probs = torch.nn.functional.log_softmax(logits, dim=-1)\n- return log_probs\n+ # the input to kl_div is expected to be log(p) \n+ return torch.nn.functional.log_softmax(logits, dim=-1)\n \n \n def forward(self,\n@@ -135,5 +145,11 @@\n p = self._get_pseudo_labels(out0, out1, negatives)\n q = self._get_pseudo_labels(out1, out0, negatives)\n \n- # calculate kullback leibler divergence from log probabilities\n- return self.alpha * 0.5 * (self.kl_div(p, q) + self.kl_div(q, p))\n+ # calculate symmetrized kullback leibler divergence\n+ if self.log_target:\n+ div = self.kl_div(p, q) + self.kl_div(q, p)\n+ else:\n+ # can't use log_target because of early torch version\n+ div = self.kl_div(p, torch.exp(q)) + self.kl_div(q, torch.exp(p))\n+\n+ return self.alpha * 0.5 * div\n", "issue": "CO2 regularizer breaks with PyTorch 1.4.0\n# CO2 regularizer breaks with PyTorch 1.4.0\r\n\r\nThe `KLDivLoss` in PyTorch only received the `log_target` flag after 1.4.0 so we need to handle the case where the flag is not available.\n", "before_files": [{"content": "\"\"\" CO2 Regularizer \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport torch\nfrom lightly.loss.memory_bank import MemoryBankModule\n\n\nclass CO2Regularizer(MemoryBankModule):\n \"\"\"Implementation of the CO2 regularizer [0] for self-supervised learning.\n\n [0] CO2, 2021, https://arxiv.org/abs/2010.02217\n\n Attributes:\n alpha:\n Weight of the regularization term.\n t_consistency:\n Temperature used during softmax calculations.\n memory_bank_size:\n Number of negative samples to store in the memory bank.\n Use 0 to use the second batch for negative samples.\n\n Examples:\n >>> # initialize loss function for MoCo\n >>> loss_fn = NTXentLoss(memory_bank_size=4096)\n >>>\n >>> # initialize CO2 regularizer\n >>> co2 = CO2Regularizer(alpha=1.0, memory_bank_size=4096)\n >>>\n >>> # generate two random trasnforms of images\n >>> t0 = transforms(images)\n >>> t1 = transforms(images)\n >>>\n >>> # feed through the MoCo model\n >>> out0, out1 = model(t0, t1)\n >>> \n >>> # calculate loss and apply regularizer\n >>> loss = loss_fn(out0, out1) + co2(out0, out1)\n\n \"\"\"\n\n def __init__(self,\n alpha: float = 1,\n t_consistency: float = 0.05,\n memory_bank_size: int = 0):\n\n super(CO2Regularizer, self).__init__(size=memory_bank_size)\n self.kl_div = torch.nn.KLDivLoss(reduction='batchmean', log_target=True)\n self.t_consistency = t_consistency\n self.alpha = alpha\n\n def _get_pseudo_labels(self,\n out0: torch.Tensor,\n out1: torch.Tensor,\n negatives: torch.Tensor = None):\n \"\"\"Computes the soft pseudo labels across negative samples.\n\n Args:\n out0:\n Output projections of the first set of transformed images (query).\n Shape: bsz x n_ftrs\n out1:\n Output projections of the second set of transformed images (positive sample).\n Shape: bsz x n_ftrs\n negatives:\n Negative samples to compare against. If this is None, the second\n batch of images will be used as negative samples.\n Shape: memory_bank_size x n_ftrs\n\n Returns:\n Log probability that a positive samples will classify each negative\n sample as the positive sample.\n Shape: bsz x (bsz - 1) or bsz x memory_bank_size\n\n \"\"\"\n batch_size, _ = out0.shape\n if negatives is None:\n # use second batch as negative samples\n # l_pos has shape bsz x 1 and l_neg has shape bsz x bsz\n l_pos = torch.einsum('nc,nc->n', [out0, out1]).unsqueeze(-1)\n l_neg = torch.einsum('nc,ck->nk', [out0, out1.t()])\n # remove elements on the diagonal\n # l_neg has shape bsz x (bsz - 1)\n l_neg = l_neg.masked_select(\n ~torch.eye(batch_size, dtype=bool, device=l_neg.device)\n ).view(batch_size, batch_size - 1)\n else:\n # use memory bank as negative samples\n # l_pos has shape bsz x 1 and l_neg has shape bsz x memory_bank_size\n negatives = negatives.to(out0.device)\n l_pos = torch.einsum('nc,nc->n', [out0, out1]).unsqueeze(-1)\n l_neg = torch.einsum('nc,ck->nk', [out0, negatives.clone().detach()])\n \n # concatenate such that positive samples are at index 0\n logits = torch.cat([l_pos, l_neg], dim=1)\n # divide by temperature\n logits = logits / self.t_consistency\n\n # the input to kl_div is expected to be log(p) and we set the\n # flag log_target to True, so both probabilities should be passed as log\n log_probs = torch.nn.functional.log_softmax(logits, dim=-1)\n return log_probs\n\n\n def forward(self,\n out0: torch.Tensor,\n out1: torch.Tensor):\n \"\"\"Computes the CO2 regularization term for two model outputs.\n\n Args:\n out0:\n Output projections of the first set of transformed images.\n out1:\n Output projections of the second set of transformed images.\n\n Returns:\n The regularization term multiplied by the weight factor alpha.\n\n \"\"\"\n\n # normalize the output to length 1\n out0 = torch.nn.functional.normalize(out0, dim=1)\n out1 = torch.nn.functional.normalize(out1, dim=1)\n\n # ask memory bank for negative samples and extend it with out1 if \n # out1 requires a gradient, otherwise keep the same vectors in the \n # memory bank (this allows for keeping the memory bank constant e.g.\n # for evaluating the loss on the test set)\n # if the memory_bank size is 0, negatives will be None\n out1, negatives = \\\n super(CO2Regularizer, self).forward(out1, update=True)\n \n # get log probabilities\n p = self._get_pseudo_labels(out0, out1, negatives)\n q = self._get_pseudo_labels(out1, out0, negatives)\n \n # calculate kullback leibler divergence from log probabilities\n return self.alpha * 0.5 * (self.kl_div(p, q) + self.kl_div(q, p))\n", "path": "lightly/loss/regularizer/co2.py"}]} | 2,206 | 550 |
gh_patches_debug_60480 | rasdani/github-patches | git_diff | mars-project__mars-954 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] TypeError: copy() got an unexpected keyword argument 'order'
<!--
Thank you for your contribution!
Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.
-->
**Describe the bug**
When I run the example in "where.py", the code is as follows:
```
>>> import mars.tensor as mt
>>> from mars.session import new_session
>>> sess = new_session().as_default()
>>> x = mt.arange(9.).reshape(3, 3)
>>> sess.run(mt.where( x > 5 ))
```
The annotation of "many.py" says that the result should be "(array([0, 1]), array([1, 0]))", but now it throws error.
**To Reproduce**
To help us reproducing this bug, please provide information below:
1. Your Python version: Python3.7
2. The version of Mars you use: 0.3.0
3. Versions of crucial packages, such as numpy, scipy and protobuf: numpy 1.18.1, scipy 1.3.2
4. Full stack of the error.
```
runfile('C:/Users/Lenovo/Desktop/test/mars/test.py', wdir='C:/Users/Lenovo/Desktop/test/mars')
Traceback (most recent call last):
File "C:\Users\Lenovo\Desktop\test\mars\test.py", line 25, in <module>
sess.run(mt.where( x > 5 ))
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\session.py", line 183, in run
result = self._sess.run(*tileables, **kw)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\session.py", line 90, in run
res = self._executor.execute_tileables(tileables, **kw)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\utils.py", line 392, in _wrapped
return func(*args, **kwargs)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\utils.py", line 480, in inner
return func(*args, **kwargs)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\executor.py", line 745, in execute_tileables
tileables, tileable_graph=tileable_graph)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\utils.py", line 392, in _wrapped
return func(*args, **kwargs)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\utils.py", line 480, in inner
return func(*args, **kwargs)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tiles.py", line 342, in build
tileables, tileable_graph=tileable_graph)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\utils.py", line 392, in _wrapped
return func(*args, **kwargs)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\utils.py", line 480, in inner
return func(*args, **kwargs)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tiles.py", line 256, in build
self._on_tile_failure(tileable_data.op, exc_info)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tiles.py", line 294, in inner
six.reraise(*exc_info)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\lib\six.py", line 703, in reraise
raise value
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tiles.py", line 236, in build
tiled = self._tile(tileable_data, tileable_graph)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tiles.py", line 330, in _tile
return super(IterativeChunkGraphBuilder, self)._tile(tileable_data, tileable_graph)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tiles.py", line 194, in _tile
tds[0]._inplace_tile()
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\core.py", line 162, in _inplace_tile
return handler.inplace_tile(self)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tiles.py", line 128, in inplace_tile
dispatched = self.dispatch(to_tile.op)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\utils.py", line 392, in _wrapped
return func(*args, **kwargs)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tiles.py", line 116, in dispatch
return op_cls.tile(op)
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tensor\indexing\nonzero.py", line 52, in tile
flattened = in_tensor.astype(bool).flatten()
File "D:\ProgramData\Anaconda3\lib\site-packages\mars\tensor\base\astype.py", line 146, in _astype
return tensor if not copy else tensor.copy(order=order)
TypeError: copy() got an unexpected keyword argument 'order'
```
5. Minimized code to reproduce the error.
```
>>> import mars.tensor as mt
>>> from mars.session import new_session
>>> sess = new_session().as_default()
>>> x = mt.arange(9.).reshape(3, 3)
>>> sess.run(mt.where( x > 5 ))
```
**Expected behavior**
(array([2, 2, 2]), array([0, 1, 2]))
That is what the annotation of "many.py" says.
**Additional context**
Please help me, thank you very much.
</issue>
<code>
[start of mars/tensor/indexing/nonzero.py]
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 # Copyright 1999-2020 Alibaba Group Holding Ltd.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 import numpy as np
18
19 from ... import opcodes as OperandDef
20 from ...serialize import KeyField
21 from ...core import ExecutableTuple
22 from ..utils import recursive_tile
23 from ..operands import TensorHasInput, TensorOperandMixin
24 from ..datasource import tensor as astensor
25 from ..core import TensorOrder
26 from .unravel_index import unravel_index
27
28
29 class TensorNonzero(TensorHasInput, TensorOperandMixin):
30 _op_type_ = OperandDef.NONZERO
31
32 _input = KeyField('input')
33
34 def __init__(self, dtype=None, **kw):
35 super().__init__(_dtype=dtype, **kw)
36
37 @property
38 def output_limit(self):
39 return float('inf')
40
41 def __call__(self, a):
42 kws = [{'shape': (np.nan,), 'order': TensorOrder.C_ORDER, '_idx_': i}
43 for i in range(a.ndim)]
44 return ExecutableTuple(self.new_tensors([a], kws=kws, output_limit=len(kws)))
45
46 @classmethod
47 def tile(cls, op):
48 from ..datasource import arange
49
50 in_tensor = op.input
51
52 flattened = in_tensor.astype(bool).flatten()
53 recursive_tile(flattened)
54 indices = arange(flattened.size, dtype=np.intp, chunk_size=flattened.nsplits)
55 indices = indices[flattened]
56 dim_indices = unravel_index(indices, in_tensor.shape)
57 [recursive_tile(ind) for ind in dim_indices]
58
59 kws = [{'nsplits': ind.nsplits, 'chunks': ind.chunks, 'shape': o.shape}
60 for ind, o in zip(dim_indices, op.outputs)]
61 new_op = op.copy()
62 return new_op.new_tensors(op.inputs, kws=kws, output_limit=len(kws))
63
64
65 def nonzero(a):
66 """
67 Return the indices of the elements that are non-zero.
68
69 Returns a tuple of tensors, one for each dimension of `a`,
70 containing the indices of the non-zero elements in that
71 dimension. The values in `a` are always tested and returned.
72 The corresponding non-zero
73 values can be obtained with::
74
75 a[nonzero(a)]
76
77 To group the indices by element, rather than dimension, use::
78
79 transpose(nonzero(a))
80
81 The result of this is always a 2-D array, with a row for
82 each non-zero element.
83
84 Parameters
85 ----------
86 a : array_like
87 Input tensor.
88
89 Returns
90 -------
91 tuple_of_arrays : tuple
92 Indices of elements that are non-zero.
93
94 See Also
95 --------
96 flatnonzero :
97 Return indices that are non-zero in the flattened version of the input
98 tensor.
99 Tensor.nonzero :
100 Equivalent tensor method.
101 count_nonzero :
102 Counts the number of non-zero elements in the input tensor.
103
104 Examples
105 --------
106 >>> import mars.tensor as mt
107 >>> from mars.session import new_session
108
109 >>> sess = new_session().as_default()
110
111 >>> x = mt.array([[1,0,0], [0,2,0], [1,1,0]])
112 >>> x.execute()
113 array([[1, 0, 0],
114 [0, 2, 0],
115 [1, 1, 0]])
116 >>> sess.run(mt.nonzero(x))
117 (array([0, 1, 2, 2]), array([0, 1, 0, 1]))
118
119 >>> x[mt.nonzero(x)].execute() # TODO(jisheng): accomplish this after fancy indexing is supported
120
121 >>> mt.transpose(mt.nonzero(x)).execute() # TODO(jisheng): accomplish this later
122
123 A common use for ``nonzero`` is to find the indices of an array, where
124 a condition is True. Given an array `a`, the condition `a` > 3 is a
125 boolean array and since False is interpreted as 0, np.nonzero(a > 3)
126 yields the indices of the `a` where the condition is true.
127
128 >>> a = mt.array([[1,2,3],[4,5,6],[7,8,9]])
129 >>> (a > 3).execute()
130 array([[False, False, False],
131 [ True, True, True],
132 [ True, True, True]])
133 >>> sess.run(mt.nonzero(a > 3))
134 (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
135
136 The ``nonzero`` method of the boolean array can also be called.
137
138 >>> sess.run((a > 3).nonzero())
139 (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
140
141 """
142 a = astensor(a)
143 op = TensorNonzero(dtype=np.dtype(np.intp))
144 return op(a)
145
[end of mars/tensor/indexing/nonzero.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mars/tensor/indexing/nonzero.py b/mars/tensor/indexing/nonzero.py
--- a/mars/tensor/indexing/nonzero.py
+++ b/mars/tensor/indexing/nonzero.py
@@ -47,7 +47,7 @@
def tile(cls, op):
from ..datasource import arange
- in_tensor = op.input
+ in_tensor = astensor(op.input)
flattened = in_tensor.astype(bool).flatten()
recursive_tile(flattened)
| {"golden_diff": "diff --git a/mars/tensor/indexing/nonzero.py b/mars/tensor/indexing/nonzero.py\n--- a/mars/tensor/indexing/nonzero.py\n+++ b/mars/tensor/indexing/nonzero.py\n@@ -47,7 +47,7 @@\n def tile(cls, op):\n from ..datasource import arange\n \n- in_tensor = op.input\n+ in_tensor = astensor(op.input)\n \n flattened = in_tensor.astype(bool).flatten()\n recursive_tile(flattened)\n", "issue": "[BUG] TypeError: copy() got an unexpected keyword argument 'order'\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\n**Describe the bug**\r\nWhen I run the example in \"where.py\", the code is as follows:\r\n\r\n```\r\n>>> import mars.tensor as mt\r\n>>> from mars.session import new_session\r\n>>> sess = new_session().as_default()\r\n>>> x = mt.arange(9.).reshape(3, 3)\r\n>>> sess.run(mt.where( x > 5 ))\r\n```\r\n\r\nThe annotation of \"many.py\" says that the result should be \"(array([0, 1]), array([1, 0]))\", but now it throws error.\r\n\r\n**To Reproduce**\r\nTo help us reproducing this bug, please provide information below:\r\n1. Your Python version: Python3.7\r\n2. The version of Mars you use: 0.3.0\r\n3. Versions of crucial packages, such as numpy, scipy and protobuf: numpy 1.18.1, scipy 1.3.2\r\n4. Full stack of the error.\r\n\r\n```\r\nrunfile('C:/Users/Lenovo/Desktop/test/mars/test.py', wdir='C:/Users/Lenovo/Desktop/test/mars')\r\nTraceback (most recent call last):\r\n\r\n File \"C:\\Users\\Lenovo\\Desktop\\test\\mars\\test.py\", line 25, in <module>\r\n sess.run(mt.where( x > 5 ))\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\session.py\", line 183, in run\r\n result = self._sess.run(*tileables, **kw)\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\session.py\", line 90, in run\r\n res = self._executor.execute_tileables(tileables, **kw)\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\utils.py\", line 392, in _wrapped\r\n return func(*args, **kwargs)\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\utils.py\", line 480, in inner\r\n return func(*args, **kwargs)\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\executor.py\", line 745, in execute_tileables\r\n tileables, tileable_graph=tileable_graph)\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\utils.py\", line 392, in _wrapped\r\n return func(*args, **kwargs)\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\utils.py\", line 480, in inner\r\n return func(*args, **kwargs)\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\tiles.py\", line 342, in build\r\n tileables, tileable_graph=tileable_graph)\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\utils.py\", line 392, in _wrapped\r\n return func(*args, **kwargs)\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\utils.py\", line 480, in inner\r\n return func(*args, **kwargs)\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\tiles.py\", line 256, in build\r\n self._on_tile_failure(tileable_data.op, exc_info)\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\tiles.py\", line 294, in inner\r\n six.reraise(*exc_info)\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\lib\\six.py\", line 703, in reraise\r\n raise value\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\tiles.py\", line 236, in build\r\n tiled = self._tile(tileable_data, tileable_graph)\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\tiles.py\", line 330, in _tile\r\n return super(IterativeChunkGraphBuilder, self)._tile(tileable_data, tileable_graph)\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\tiles.py\", line 194, in _tile\r\n tds[0]._inplace_tile()\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\core.py\", line 162, in _inplace_tile\r\n return handler.inplace_tile(self)\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\tiles.py\", line 128, in inplace_tile\r\n dispatched = self.dispatch(to_tile.op)\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\utils.py\", line 392, in _wrapped\r\n return func(*args, **kwargs)\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\tiles.py\", line 116, in dispatch\r\n return op_cls.tile(op)\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\tensor\\indexing\\nonzero.py\", line 52, in tile\r\n flattened = in_tensor.astype(bool).flatten()\r\n\r\n File \"D:\\ProgramData\\Anaconda3\\lib\\site-packages\\mars\\tensor\\base\\astype.py\", line 146, in _astype\r\n return tensor if not copy else tensor.copy(order=order)\r\n\r\nTypeError: copy() got an unexpected keyword argument 'order'\r\n\r\n```\r\n\r\n5. Minimized code to reproduce the error.\r\n\r\n```\r\n>>> import mars.tensor as mt\r\n>>> from mars.session import new_session\r\n>>> sess = new_session().as_default()\r\n>>> x = mt.arange(9.).reshape(3, 3)\r\n>>> sess.run(mt.where( x > 5 ))\r\n```\r\n\r\n**Expected behavior**\r\n(array([2, 2, 2]), array([0, 1, 2]))\r\nThat is what the annotation of \"many.py\" says.\r\n\r\n**Additional context**\r\nPlease help me, thank you very much.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2020 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom ... import opcodes as OperandDef\nfrom ...serialize import KeyField\nfrom ...core import ExecutableTuple\nfrom ..utils import recursive_tile\nfrom ..operands import TensorHasInput, TensorOperandMixin\nfrom ..datasource import tensor as astensor\nfrom ..core import TensorOrder\nfrom .unravel_index import unravel_index\n\n\nclass TensorNonzero(TensorHasInput, TensorOperandMixin):\n _op_type_ = OperandDef.NONZERO\n\n _input = KeyField('input')\n\n def __init__(self, dtype=None, **kw):\n super().__init__(_dtype=dtype, **kw)\n\n @property\n def output_limit(self):\n return float('inf')\n\n def __call__(self, a):\n kws = [{'shape': (np.nan,), 'order': TensorOrder.C_ORDER, '_idx_': i}\n for i in range(a.ndim)]\n return ExecutableTuple(self.new_tensors([a], kws=kws, output_limit=len(kws)))\n\n @classmethod\n def tile(cls, op):\n from ..datasource import arange\n\n in_tensor = op.input\n\n flattened = in_tensor.astype(bool).flatten()\n recursive_tile(flattened)\n indices = arange(flattened.size, dtype=np.intp, chunk_size=flattened.nsplits)\n indices = indices[flattened]\n dim_indices = unravel_index(indices, in_tensor.shape)\n [recursive_tile(ind) for ind in dim_indices]\n\n kws = [{'nsplits': ind.nsplits, 'chunks': ind.chunks, 'shape': o.shape}\n for ind, o in zip(dim_indices, op.outputs)]\n new_op = op.copy()\n return new_op.new_tensors(op.inputs, kws=kws, output_limit=len(kws))\n\n\ndef nonzero(a):\n \"\"\"\n Return the indices of the elements that are non-zero.\n\n Returns a tuple of tensors, one for each dimension of `a`,\n containing the indices of the non-zero elements in that\n dimension. The values in `a` are always tested and returned.\n The corresponding non-zero\n values can be obtained with::\n\n a[nonzero(a)]\n\n To group the indices by element, rather than dimension, use::\n\n transpose(nonzero(a))\n\n The result of this is always a 2-D array, with a row for\n each non-zero element.\n\n Parameters\n ----------\n a : array_like\n Input tensor.\n\n Returns\n -------\n tuple_of_arrays : tuple\n Indices of elements that are non-zero.\n\n See Also\n --------\n flatnonzero :\n Return indices that are non-zero in the flattened version of the input\n tensor.\n Tensor.nonzero :\n Equivalent tensor method.\n count_nonzero :\n Counts the number of non-zero elements in the input tensor.\n\n Examples\n --------\n >>> import mars.tensor as mt\n >>> from mars.session import new_session\n\n >>> sess = new_session().as_default()\n\n >>> x = mt.array([[1,0,0], [0,2,0], [1,1,0]])\n >>> x.execute()\n array([[1, 0, 0],\n [0, 2, 0],\n [1, 1, 0]])\n >>> sess.run(mt.nonzero(x))\n (array([0, 1, 2, 2]), array([0, 1, 0, 1]))\n\n >>> x[mt.nonzero(x)].execute() # TODO(jisheng): accomplish this after fancy indexing is supported\n\n >>> mt.transpose(mt.nonzero(x)).execute() # TODO(jisheng): accomplish this later\n\n A common use for ``nonzero`` is to find the indices of an array, where\n a condition is True. Given an array `a`, the condition `a` > 3 is a\n boolean array and since False is interpreted as 0, np.nonzero(a > 3)\n yields the indices of the `a` where the condition is true.\n\n >>> a = mt.array([[1,2,3],[4,5,6],[7,8,9]])\n >>> (a > 3).execute()\n array([[False, False, False],\n [ True, True, True],\n [ True, True, True]])\n >>> sess.run(mt.nonzero(a > 3))\n (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))\n\n The ``nonzero`` method of the boolean array can also be called.\n\n >>> sess.run((a > 3).nonzero())\n (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))\n\n \"\"\"\n a = astensor(a)\n op = TensorNonzero(dtype=np.dtype(np.intp))\n return op(a)\n", "path": "mars/tensor/indexing/nonzero.py"}]} | 3,516 | 112 |
gh_patches_debug_12289 | rasdani/github-patches | git_diff | modin-project__modin-794 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pyarrow is a dependency but is not in `install_requires`
### Describe the problem
<!-- Describe the problem clearly here. -->
The source comes from this file: https://github.com/modin-project/modin/blob/master/modin/experimental/engines/pyarrow_on_ray/io.py#L4-L5
### Source code / logs
<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->
</issue>
<code>
[start of modin/experimental/engines/pyarrow_on_ray/io.py]
1 from io import BytesIO
2
3 import pandas
4 import pyarrow as pa
5 import pyarrow.csv as csv
6
7 from modin.backends.pyarrow.query_compiler import PyarrowQueryCompiler
8 from modin.data_management.utils import get_default_chunksize
9 from modin.engines.ray.generic.io import RayIO
10 from modin.experimental.engines.pyarrow_on_ray.frame.data import PyarrowOnRayFrame
11 from modin.experimental.engines.pyarrow_on_ray.frame.partition import (
12 PyarrowOnRayFramePartition,
13 )
14 from modin import __execution_engine__
15
16 if __execution_engine__ == "Ray":
17 import ray
18
19 @ray.remote
20 def _read_csv_with_offset_pyarrow_on_ray(
21 fname, num_splits, start, end, kwargs, header
22 ): # pragma: no cover
23 """Use a Ray task to read a chunk of a CSV into a pyarrow Table.
24 Note: Ray functions are not detected by codecov (thus pragma: no cover)
25 Args:
26 fname: The filename of the file to open.
27 num_splits: The number of splits (partitions) to separate the DataFrame into.
28 start: The start byte offset.
29 end: The end byte offset.
30 kwargs: The kwargs for the pyarrow `read_csv` function.
31 header: The header of the file.
32 Returns:
33 A list containing the split pyarrow Tables and the the number of
34 rows of the tables as the last element. This is used to determine
35 the total length of the DataFrame to build a default Index.
36 """
37 bio = open(fname, "rb")
38 # The header line for the CSV file
39 first_line = bio.readline()
40 bio.seek(start)
41 to_read = header + first_line + bio.read(end - start)
42 bio.close()
43 table = csv.read_csv(
44 BytesIO(to_read), parse_options=csv.ParseOptions(header_rows=1)
45 )
46 chunksize = get_default_chunksize(table.num_columns, num_splits)
47 chunks = [
48 pa.Table.from_arrays(table.columns[chunksize * i : chunksize * (i + 1)])
49 for i in range(num_splits)
50 ]
51 return chunks + [
52 table.num_rows,
53 pandas.Series(
54 [t.to_pandas_dtype() for t in table.schema.types],
55 index=table.schema.names,
56 ),
57 ]
58
59
60 class PyarrowOnRayIO(RayIO):
61 frame_cls = PyarrowOnRayFrame
62 frame_partition_cls = PyarrowOnRayFramePartition
63 query_compiler_cls = PyarrowQueryCompiler
64
65 read_parquet_remote_task = None
66 if __execution_engine__ == "Ray":
67 read_csv_remote_task = _read_csv_with_offset_pyarrow_on_ray
68 read_hdf_remote_task = None
69 read_feather_remote_task = None
70 read_sql_remote_task = None
71
[end of modin/experimental/engines/pyarrow_on_ray/io.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modin/experimental/engines/pyarrow_on_ray/io.py b/modin/experimental/engines/pyarrow_on_ray/io.py
--- a/modin/experimental/engines/pyarrow_on_ray/io.py
+++ b/modin/experimental/engines/pyarrow_on_ray/io.py
@@ -1,8 +1,6 @@
from io import BytesIO
import pandas
-import pyarrow as pa
-import pyarrow.csv as csv
from modin.backends.pyarrow.query_compiler import PyarrowQueryCompiler
from modin.data_management.utils import get_default_chunksize
@@ -15,6 +13,8 @@
if __execution_engine__ == "Ray":
import ray
+ import pyarrow as pa
+ import pyarrow.csv as csv
@ray.remote
def _read_csv_with_offset_pyarrow_on_ray(
| {"golden_diff": "diff --git a/modin/experimental/engines/pyarrow_on_ray/io.py b/modin/experimental/engines/pyarrow_on_ray/io.py\n--- a/modin/experimental/engines/pyarrow_on_ray/io.py\n+++ b/modin/experimental/engines/pyarrow_on_ray/io.py\n@@ -1,8 +1,6 @@\n from io import BytesIO\n \n import pandas\n-import pyarrow as pa\n-import pyarrow.csv as csv\n \n from modin.backends.pyarrow.query_compiler import PyarrowQueryCompiler\n from modin.data_management.utils import get_default_chunksize\n@@ -15,6 +13,8 @@\n \n if __execution_engine__ == \"Ray\":\n import ray\n+ import pyarrow as pa\n+ import pyarrow.csv as csv\n \n @ray.remote\n def _read_csv_with_offset_pyarrow_on_ray(\n", "issue": "pyarrow is a dependency but is not in `install_requires`\n\r\n### Describe the problem\r\n<!-- Describe the problem clearly here. -->\r\nThe source comes from this file: https://github.com/modin-project/modin/blob/master/modin/experimental/engines/pyarrow_on_ray/io.py#L4-L5\r\n\r\n### Source code / logs\r\n<!-- Include any logs or source code that would be helpful to diagnose the problem. If including tracebacks, please include the full traceback. Large logs and files should be attached. Try to provide a reproducible test case that is the bare minimum necessary to generate the problem. -->\r\n\n", "before_files": [{"content": "from io import BytesIO\n\nimport pandas\nimport pyarrow as pa\nimport pyarrow.csv as csv\n\nfrom modin.backends.pyarrow.query_compiler import PyarrowQueryCompiler\nfrom modin.data_management.utils import get_default_chunksize\nfrom modin.engines.ray.generic.io import RayIO\nfrom modin.experimental.engines.pyarrow_on_ray.frame.data import PyarrowOnRayFrame\nfrom modin.experimental.engines.pyarrow_on_ray.frame.partition import (\n PyarrowOnRayFramePartition,\n)\nfrom modin import __execution_engine__\n\nif __execution_engine__ == \"Ray\":\n import ray\n\n @ray.remote\n def _read_csv_with_offset_pyarrow_on_ray(\n fname, num_splits, start, end, kwargs, header\n ): # pragma: no cover\n \"\"\"Use a Ray task to read a chunk of a CSV into a pyarrow Table.\n Note: Ray functions are not detected by codecov (thus pragma: no cover)\n Args:\n fname: The filename of the file to open.\n num_splits: The number of splits (partitions) to separate the DataFrame into.\n start: The start byte offset.\n end: The end byte offset.\n kwargs: The kwargs for the pyarrow `read_csv` function.\n header: The header of the file.\n Returns:\n A list containing the split pyarrow Tables and the the number of\n rows of the tables as the last element. This is used to determine\n the total length of the DataFrame to build a default Index.\n \"\"\"\n bio = open(fname, \"rb\")\n # The header line for the CSV file\n first_line = bio.readline()\n bio.seek(start)\n to_read = header + first_line + bio.read(end - start)\n bio.close()\n table = csv.read_csv(\n BytesIO(to_read), parse_options=csv.ParseOptions(header_rows=1)\n )\n chunksize = get_default_chunksize(table.num_columns, num_splits)\n chunks = [\n pa.Table.from_arrays(table.columns[chunksize * i : chunksize * (i + 1)])\n for i in range(num_splits)\n ]\n return chunks + [\n table.num_rows,\n pandas.Series(\n [t.to_pandas_dtype() for t in table.schema.types],\n index=table.schema.names,\n ),\n ]\n\n\nclass PyarrowOnRayIO(RayIO):\n frame_cls = PyarrowOnRayFrame\n frame_partition_cls = PyarrowOnRayFramePartition\n query_compiler_cls = PyarrowQueryCompiler\n\n read_parquet_remote_task = None\n if __execution_engine__ == \"Ray\":\n read_csv_remote_task = _read_csv_with_offset_pyarrow_on_ray\n read_hdf_remote_task = None\n read_feather_remote_task = None\n read_sql_remote_task = None\n", "path": "modin/experimental/engines/pyarrow_on_ray/io.py"}]} | 1,410 | 182 |
gh_patches_debug_1110 | rasdani/github-patches | git_diff | dask__distributed-3910 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Variables leak virtual clients
**What happened**:
```python
def _get_number_of_clients(dask_scheduler: Optional[Scheduler] = None) -> Optional[int]:
if dask_scheduler is None:
return None
else:
return len(dask_scheduler.clients)
n_clients1 = client.run_on_scheduler(_get_number_of_clients)
assert n_clients1 is not None
for i in range(100):
future = create_some_dummy_future()
var = Variable(f"var-{i}", client)
var.set(future)
future.cancel(force=True)
var.delete()
n_clients2 = client.run_on_scheduler(_get_number_of_clients)
assert n_clients2 is not None
assert n_client1 == n_clients2
```
This fails, because here:
https://github.com/dask/distributed/blob/1d7640b0172febf9ceef37c2c31241c66ac165eb/distributed/scheduler.py#L2333-L2347
a new virtual client for each and every variable is created but here:
https://github.com/dask/distributed/blob/1d7640b0172febf9ceef37c2c31241c66ac165eb/distributed/scheduler.py#L2349-L2361
we never clean up / prune these clients again.
**What you expected to happen**:
Scheduler-side state is not leaky.
**Environment**:
- Dask version: 2.17.2
- Distributed version: 2.17.0
- Python version: 3.6
- Operating System: Linux
- Install method (conda, pip, source): pip
</issue>
<code>
[start of distributed/variable.py]
1 import asyncio
2 from collections import defaultdict
3 from contextlib import suppress
4 import logging
5 import uuid
6
7 from tlz import merge
8
9 from .client import Future, Client
10 from .utils import tokey, log_errors, TimeoutError, parse_timedelta
11 from .worker import get_client
12
13 logger = logging.getLogger(__name__)
14
15
16 class VariableExtension:
17 """ An extension for the scheduler to manage queues
18
19 This adds the following routes to the scheduler
20
21 * variable-set
22 * variable-get
23 * variable-delete
24 """
25
26 def __init__(self, scheduler):
27 self.scheduler = scheduler
28 self.variables = dict()
29 self.waiting = defaultdict(set)
30 self.waiting_conditions = defaultdict(asyncio.Condition)
31 self.started = asyncio.Condition()
32
33 self.scheduler.handlers.update(
34 {"variable_set": self.set, "variable_get": self.get}
35 )
36
37 self.scheduler.stream_handlers["variable-future-release"] = self.future_release
38 self.scheduler.stream_handlers["variable_delete"] = self.delete
39
40 self.scheduler.extensions["variables"] = self
41
42 async def set(self, comm=None, name=None, key=None, data=None, client=None):
43 if key is not None:
44 record = {"type": "Future", "value": key}
45 self.scheduler.client_desires_keys(keys=[key], client="variable-%s" % name)
46 else:
47 record = {"type": "msgpack", "value": data}
48 try:
49 old = self.variables[name]
50 except KeyError:
51 pass
52 else:
53 if old["type"] == "Future" and old["value"] != key:
54 asyncio.ensure_future(self.release(old["value"], name))
55 if name not in self.variables:
56 async with self.started:
57 self.started.notify_all()
58 self.variables[name] = record
59
60 async def release(self, key, name):
61 while self.waiting[key, name]:
62 async with self.waiting_conditions[name]:
63 await self.waiting_conditions[name].wait()
64
65 self.scheduler.client_releases_keys(keys=[key], client="variable-%s" % name)
66 del self.waiting[key, name]
67
68 async def future_release(self, name=None, key=None, token=None, client=None):
69 self.waiting[key, name].remove(token)
70 if not self.waiting[key, name]:
71 async with self.waiting_conditions[name]:
72 self.waiting_conditions[name].notify_all()
73
74 async def get(self, comm=None, name=None, client=None, timeout=None):
75 start = self.scheduler.loop.time()
76 while name not in self.variables:
77 if timeout is not None:
78 left = timeout - (self.scheduler.loop.time() - start)
79 else:
80 left = None
81 if left and left < 0:
82 raise TimeoutError()
83 try:
84
85 async def _(): # Python 3.6 is odd and requires special help here
86 await self.started.acquire()
87 await self.started.wait()
88
89 await asyncio.wait_for(_(), timeout=left)
90 finally:
91 with suppress(RuntimeError): # Python 3.6 loses lock on finally clause
92 self.started.release()
93
94 record = self.variables[name]
95 if record["type"] == "Future":
96 key = record["value"]
97 token = uuid.uuid4().hex
98 ts = self.scheduler.tasks.get(key)
99 state = ts.state if ts is not None else "lost"
100 msg = {"token": token, "state": state}
101 if state == "erred":
102 msg["exception"] = ts.exception_blame.exception
103 msg["traceback"] = ts.exception_blame.traceback
104 record = merge(record, msg)
105 self.waiting[key, name].add(token)
106 return record
107
108 async def delete(self, comm=None, name=None, client=None):
109 with log_errors():
110 try:
111 old = self.variables[name]
112 except KeyError:
113 pass
114 else:
115 if old["type"] == "Future":
116 await self.release(old["value"], name)
117 with suppress(KeyError):
118 del self.waiting_conditions[name]
119 with suppress(KeyError):
120 del self.variables[name]
121
122
123 class Variable:
124 """ Distributed Global Variable
125
126 This allows multiple clients to share futures and data between each other
127 with a single mutable variable. All metadata is sequentialized through the
128 scheduler. Race conditions can occur.
129
130 Values must be either Futures or msgpack-encodable data (ints, lists,
131 strings, etc..) All data will be kept and sent through the scheduler, so
132 it is wise not to send too much. If you want to share a large amount of
133 data then ``scatter`` it and share the future instead.
134
135 .. warning::
136
137 This object is experimental and has known issues in Python 2
138
139 Parameters
140 ----------
141 name: string (optional)
142 Name used by other clients and the scheduler to identify the variable.
143 If not given, a random name will be generated.
144 client: Client (optional)
145 Client used for communication with the scheduler. Defaults to the
146 value of ``Client.current()``.
147
148 Examples
149 --------
150 >>> from dask.distributed import Client, Variable # doctest: +SKIP
151 >>> client = Client() # doctest: +SKIP
152 >>> x = Variable('x') # doctest: +SKIP
153 >>> x.set(123) # docttest: +SKIP
154 >>> x.get() # docttest: +SKIP
155 123
156 >>> future = client.submit(f, x) # doctest: +SKIP
157 >>> x.set(future) # doctest: +SKIP
158
159 See Also
160 --------
161 Queue: shared multi-producer/multi-consumer queue between clients
162 """
163
164 def __init__(self, name=None, client=None, maxsize=0):
165 self.client = client or Client.current()
166 self.name = name or "variable-" + uuid.uuid4().hex
167
168 async def _set(self, value):
169 if isinstance(value, Future):
170 await self.client.scheduler.variable_set(
171 key=tokey(value.key), name=self.name
172 )
173 else:
174 await self.client.scheduler.variable_set(data=value, name=self.name)
175
176 def set(self, value, **kwargs):
177 """ Set the value of this variable
178
179 Parameters
180 ----------
181 value: Future or object
182 Must be either a Future or a msgpack-encodable value
183 """
184 return self.client.sync(self._set, value, **kwargs)
185
186 async def _get(self, timeout=None):
187 d = await self.client.scheduler.variable_get(
188 timeout=timeout, name=self.name, client=self.client.id
189 )
190 if d["type"] == "Future":
191 value = Future(d["value"], self.client, inform=True, state=d["state"])
192 if d["state"] == "erred":
193 value._state.set_error(d["exception"], d["traceback"])
194 self.client._send_to_scheduler(
195 {
196 "op": "variable-future-release",
197 "name": self.name,
198 "key": d["value"],
199 "token": d["token"],
200 }
201 )
202 else:
203 value = d["value"]
204 return value
205
206 def get(self, timeout=None, **kwargs):
207 """ Get the value of this variable
208
209 Parameters
210 ----------
211 timeout: number or string or timedelta, optional
212 Time in seconds to wait before timing out.
213 Instead of number of seconds, it is also possible to specify
214 a timedelta in string format, e.g. "200ms".
215 """
216 timeout = parse_timedelta(timeout)
217 return self.client.sync(self._get, timeout=timeout, **kwargs)
218
219 def delete(self):
220 """ Delete this variable
221
222 Caution, this affects all clients currently pointing to this variable.
223 """
224 if self.client.status == "running": # TODO: can leave zombie futures
225 self.client._send_to_scheduler({"op": "variable_delete", "name": self.name})
226
227 def __getstate__(self):
228 return (self.name, self.client.scheduler.address)
229
230 def __setstate__(self, state):
231 name, address = state
232 try:
233 client = get_client(address)
234 assert client.scheduler.address == address
235 except (AttributeError, AssertionError):
236 client = Client(address, set_as_default=False)
237 self.__init__(name=name, client=client)
238
[end of distributed/variable.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/distributed/variable.py b/distributed/variable.py
--- a/distributed/variable.py
+++ b/distributed/variable.py
@@ -119,6 +119,8 @@
with suppress(KeyError):
del self.variables[name]
+ self.scheduler.remove_client("variable-%s" % name)
+
class Variable:
""" Distributed Global Variable
| {"golden_diff": "diff --git a/distributed/variable.py b/distributed/variable.py\n--- a/distributed/variable.py\n+++ b/distributed/variable.py\n@@ -119,6 +119,8 @@\n with suppress(KeyError):\n del self.variables[name]\n \n+ self.scheduler.remove_client(\"variable-%s\" % name)\n+\n \n class Variable:\n \"\"\" Distributed Global Variable\n", "issue": "Variables leak virtual clients\n**What happened**:\r\n\r\n```python\r\ndef _get_number_of_clients(dask_scheduler: Optional[Scheduler] = None) -> Optional[int]:\r\n if dask_scheduler is None:\r\n return None\r\n else:\r\n return len(dask_scheduler.clients)\r\n\r\nn_clients1 = client.run_on_scheduler(_get_number_of_clients)\r\nassert n_clients1 is not None\r\n\r\nfor i in range(100):\r\n future = create_some_dummy_future()\r\n var = Variable(f\"var-{i}\", client)\r\n var.set(future)\r\n future.cancel(force=True)\r\n var.delete()\r\n\r\nn_clients2 = client.run_on_scheduler(_get_number_of_clients)\r\nassert n_clients2 is not None\r\n\r\nassert n_client1 == n_clients2\r\n```\r\n\r\nThis fails, because here:\r\n\r\nhttps://github.com/dask/distributed/blob/1d7640b0172febf9ceef37c2c31241c66ac165eb/distributed/scheduler.py#L2333-L2347\r\n\r\na new virtual client for each and every variable is created but here:\r\n\r\nhttps://github.com/dask/distributed/blob/1d7640b0172febf9ceef37c2c31241c66ac165eb/distributed/scheduler.py#L2349-L2361\r\n\r\nwe never clean up / prune these clients again.\r\n\r\n**What you expected to happen**:\r\n\r\nScheduler-side state is not leaky.\r\n\r\n**Environment**:\r\n\r\n- Dask version: 2.17.2\r\n- Distributed version: 2.17.0\r\n- Python version: 3.6\r\n- Operating System: Linux\r\n- Install method (conda, pip, source): pip\r\n\n", "before_files": [{"content": "import asyncio\nfrom collections import defaultdict\nfrom contextlib import suppress\nimport logging\nimport uuid\n\nfrom tlz import merge\n\nfrom .client import Future, Client\nfrom .utils import tokey, log_errors, TimeoutError, parse_timedelta\nfrom .worker import get_client\n\nlogger = logging.getLogger(__name__)\n\n\nclass VariableExtension:\n \"\"\" An extension for the scheduler to manage queues\n\n This adds the following routes to the scheduler\n\n * variable-set\n * variable-get\n * variable-delete\n \"\"\"\n\n def __init__(self, scheduler):\n self.scheduler = scheduler\n self.variables = dict()\n self.waiting = defaultdict(set)\n self.waiting_conditions = defaultdict(asyncio.Condition)\n self.started = asyncio.Condition()\n\n self.scheduler.handlers.update(\n {\"variable_set\": self.set, \"variable_get\": self.get}\n )\n\n self.scheduler.stream_handlers[\"variable-future-release\"] = self.future_release\n self.scheduler.stream_handlers[\"variable_delete\"] = self.delete\n\n self.scheduler.extensions[\"variables\"] = self\n\n async def set(self, comm=None, name=None, key=None, data=None, client=None):\n if key is not None:\n record = {\"type\": \"Future\", \"value\": key}\n self.scheduler.client_desires_keys(keys=[key], client=\"variable-%s\" % name)\n else:\n record = {\"type\": \"msgpack\", \"value\": data}\n try:\n old = self.variables[name]\n except KeyError:\n pass\n else:\n if old[\"type\"] == \"Future\" and old[\"value\"] != key:\n asyncio.ensure_future(self.release(old[\"value\"], name))\n if name not in self.variables:\n async with self.started:\n self.started.notify_all()\n self.variables[name] = record\n\n async def release(self, key, name):\n while self.waiting[key, name]:\n async with self.waiting_conditions[name]:\n await self.waiting_conditions[name].wait()\n\n self.scheduler.client_releases_keys(keys=[key], client=\"variable-%s\" % name)\n del self.waiting[key, name]\n\n async def future_release(self, name=None, key=None, token=None, client=None):\n self.waiting[key, name].remove(token)\n if not self.waiting[key, name]:\n async with self.waiting_conditions[name]:\n self.waiting_conditions[name].notify_all()\n\n async def get(self, comm=None, name=None, client=None, timeout=None):\n start = self.scheduler.loop.time()\n while name not in self.variables:\n if timeout is not None:\n left = timeout - (self.scheduler.loop.time() - start)\n else:\n left = None\n if left and left < 0:\n raise TimeoutError()\n try:\n\n async def _(): # Python 3.6 is odd and requires special help here\n await self.started.acquire()\n await self.started.wait()\n\n await asyncio.wait_for(_(), timeout=left)\n finally:\n with suppress(RuntimeError): # Python 3.6 loses lock on finally clause\n self.started.release()\n\n record = self.variables[name]\n if record[\"type\"] == \"Future\":\n key = record[\"value\"]\n token = uuid.uuid4().hex\n ts = self.scheduler.tasks.get(key)\n state = ts.state if ts is not None else \"lost\"\n msg = {\"token\": token, \"state\": state}\n if state == \"erred\":\n msg[\"exception\"] = ts.exception_blame.exception\n msg[\"traceback\"] = ts.exception_blame.traceback\n record = merge(record, msg)\n self.waiting[key, name].add(token)\n return record\n\n async def delete(self, comm=None, name=None, client=None):\n with log_errors():\n try:\n old = self.variables[name]\n except KeyError:\n pass\n else:\n if old[\"type\"] == \"Future\":\n await self.release(old[\"value\"], name)\n with suppress(KeyError):\n del self.waiting_conditions[name]\n with suppress(KeyError):\n del self.variables[name]\n\n\nclass Variable:\n \"\"\" Distributed Global Variable\n\n This allows multiple clients to share futures and data between each other\n with a single mutable variable. All metadata is sequentialized through the\n scheduler. Race conditions can occur.\n\n Values must be either Futures or msgpack-encodable data (ints, lists,\n strings, etc..) All data will be kept and sent through the scheduler, so\n it is wise not to send too much. If you want to share a large amount of\n data then ``scatter`` it and share the future instead.\n\n .. warning::\n\n This object is experimental and has known issues in Python 2\n\n Parameters\n ----------\n name: string (optional)\n Name used by other clients and the scheduler to identify the variable.\n If not given, a random name will be generated.\n client: Client (optional)\n Client used for communication with the scheduler. Defaults to the\n value of ``Client.current()``.\n\n Examples\n --------\n >>> from dask.distributed import Client, Variable # doctest: +SKIP\n >>> client = Client() # doctest: +SKIP\n >>> x = Variable('x') # doctest: +SKIP\n >>> x.set(123) # docttest: +SKIP\n >>> x.get() # docttest: +SKIP\n 123\n >>> future = client.submit(f, x) # doctest: +SKIP\n >>> x.set(future) # doctest: +SKIP\n\n See Also\n --------\n Queue: shared multi-producer/multi-consumer queue between clients\n \"\"\"\n\n def __init__(self, name=None, client=None, maxsize=0):\n self.client = client or Client.current()\n self.name = name or \"variable-\" + uuid.uuid4().hex\n\n async def _set(self, value):\n if isinstance(value, Future):\n await self.client.scheduler.variable_set(\n key=tokey(value.key), name=self.name\n )\n else:\n await self.client.scheduler.variable_set(data=value, name=self.name)\n\n def set(self, value, **kwargs):\n \"\"\" Set the value of this variable\n\n Parameters\n ----------\n value: Future or object\n Must be either a Future or a msgpack-encodable value\n \"\"\"\n return self.client.sync(self._set, value, **kwargs)\n\n async def _get(self, timeout=None):\n d = await self.client.scheduler.variable_get(\n timeout=timeout, name=self.name, client=self.client.id\n )\n if d[\"type\"] == \"Future\":\n value = Future(d[\"value\"], self.client, inform=True, state=d[\"state\"])\n if d[\"state\"] == \"erred\":\n value._state.set_error(d[\"exception\"], d[\"traceback\"])\n self.client._send_to_scheduler(\n {\n \"op\": \"variable-future-release\",\n \"name\": self.name,\n \"key\": d[\"value\"],\n \"token\": d[\"token\"],\n }\n )\n else:\n value = d[\"value\"]\n return value\n\n def get(self, timeout=None, **kwargs):\n \"\"\" Get the value of this variable\n\n Parameters\n ----------\n timeout: number or string or timedelta, optional\n Time in seconds to wait before timing out.\n Instead of number of seconds, it is also possible to specify\n a timedelta in string format, e.g. \"200ms\".\n \"\"\"\n timeout = parse_timedelta(timeout)\n return self.client.sync(self._get, timeout=timeout, **kwargs)\n\n def delete(self):\n \"\"\" Delete this variable\n\n Caution, this affects all clients currently pointing to this variable.\n \"\"\"\n if self.client.status == \"running\": # TODO: can leave zombie futures\n self.client._send_to_scheduler({\"op\": \"variable_delete\", \"name\": self.name})\n\n def __getstate__(self):\n return (self.name, self.client.scheduler.address)\n\n def __setstate__(self, state):\n name, address = state\n try:\n client = get_client(address)\n assert client.scheduler.address == address\n except (AttributeError, AssertionError):\n client = Client(address, set_as_default=False)\n self.__init__(name=name, client=client)\n", "path": "distributed/variable.py"}]} | 3,342 | 84 |
gh_patches_debug_32373 | rasdani/github-patches | git_diff | CiviWiki__OpenCiviWiki-1156 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Consolidate Backbone templates for comprehension
We are beginning the process of porting Backbone templates to the Django template syntax. However, the Backbone templates are often spread across multiple "partial" files, where a single Backbone model is applied against several combined template fragments.
In order to make it easier to port our Backbone templates, we can combine the templates into single files, where possible.
## Task
- [x] combine Backbone template partials into monolithic template files for easier comprehension
</issue>
<code>
[start of project/accounts/views.py]
1 """
2 Class based views.
3
4 This module will include views for the accounts app.
5 """
6
7 from core.custom_decorators import full_profile, login_required
8 from django.conf import settings
9 from django.contrib.auth import get_user_model, login
10 from django.contrib.auth import views as auth_views
11 from django.contrib.auth.mixins import LoginRequiredMixin
12 from django.contrib.sites.shortcuts import get_current_site
13 from django.http import HttpResponseRedirect
14 from django.template.response import TemplateResponse
15 from django.urls import reverse_lazy
16 from django.utils.encoding import force_str
17 from django.utils.http import urlsafe_base64_decode
18 from django.views import View
19 from django.views.generic.edit import FormView, UpdateView
20
21 from accounts.authentication import account_activation_token, send_activation_email
22 from accounts.forms import ProfileEditForm, UpdateProfileImage, UserRegistrationForm
23 from accounts.models import Profile
24
25
26 class RegisterView(FormView):
27 """
28 A form view that handles user registration.
29 """
30
31 template_name = "accounts/register/register.html"
32 form_class = UserRegistrationForm
33 success_url = "/"
34
35 def _create_user(self, form):
36 username = form.cleaned_data["username"]
37 password = form.cleaned_data["password"]
38 email = form.cleaned_data["email"]
39 user = get_user_model().objects.create_user(username, email, password)
40 return user
41
42 def _send_email(self, user):
43 domain = get_current_site(self.request).domain
44 send_activation_email(user, domain)
45
46 def _login(self, user):
47 login(self.request, user)
48
49 def form_valid(self, form):
50 user = self._create_user(form)
51
52 self._send_email(user)
53 self._login(user)
54
55 return super(RegisterView, self).form_valid(form)
56
57
58 class PasswordResetView(auth_views.PasswordResetView):
59 template_name = "accounts/users/password_reset.html"
60 email_template_name = "accounts/users/password_reset_email.html"
61 subject_template_name = "accounts/users/password_reset_subject.txt"
62 from_email = settings.EMAIL_HOST_USER
63 success_url = reverse_lazy("accounts_password_reset_done")
64
65
66 class PasswordResetDoneView(auth_views.PasswordResetDoneView):
67 template_name = "accounts/users/password_reset_done.html"
68
69
70 class PasswordResetConfirmView(auth_views.PasswordResetConfirmView):
71 template_name = "accounts/users/password_reset_confirm.html"
72 success_url = reverse_lazy("accounts_password_reset_complete")
73
74
75 class PasswordResetCompleteView(auth_views.PasswordResetCompleteView):
76 template_name = "accounts/users/password_reset_complete.html"
77
78
79 class SettingsView(LoginRequiredMixin, UpdateView):
80 """A form view to edit Profile"""
81
82 login_url = "accounts_login"
83 form_class = ProfileEditForm
84 success_url = reverse_lazy("accounts_settings")
85 template_name = "accounts/utils/update_settings.html"
86
87 def get_object(self, queryset=None):
88 return Profile.objects.get(user=self.request.user)
89
90 def get_initial(self):
91 profile = Profile.objects.get(user=self.request.user)
92 self.initial.update(
93 {
94 "username": profile.user.username,
95 "email": profile.user.email,
96 "first_name": profile.first_name or None,
97 "last_name": profile.last_name or None,
98 "about_me": profile.about_me or None,
99 }
100 )
101 return super(SettingsView, self).get_initial()
102
103
104 class ProfileActivationView(View):
105 """
106 This shows different views to the user when they are verifying
107 their account based on whether they are already verified or not.
108 """
109
110 def get(self, request, uidb64, token):
111
112 User = get_user_model()
113 try:
114 uid = force_str(urlsafe_base64_decode(uidb64))
115 user = User.objects.get(pk=uid)
116
117 except (TypeError, ValueError, OverflowError, User.DoesNotExist):
118 user = None
119
120 if user is not None and account_activation_token.check_token(user, token):
121 profile = Profile.objects.get(user=user)
122 if profile.is_verified:
123 redirect_link = {"href": "/", "label": "Back to Main"}
124 template_var = {
125 "title": "Email Already Verified",
126 "content": "You have already verified your email",
127 "link": redirect_link,
128 }
129 return TemplateResponse(request, "general-message.html", template_var)
130 else:
131 profile.is_verified = True
132 profile.save()
133
134 redirect_link = {"href": "/", "label": "Back to Main"}
135 template_var = {
136 "title": "Email Verification Successful",
137 "content": "Thank you for verifying your email with CiviWiki",
138 "link": redirect_link,
139 }
140 return TemplateResponse(request, "general-message.html", template_var)
141 else:
142 # invalid link
143 redirect_link = {"href": "/", "label": "Back to Main"}
144 template_var = {
145 "title": "Email Verification Error",
146 "content": "Email could not be verified",
147 "link": redirect_link,
148 }
149 return TemplateResponse(request, "general-message.html", template_var)
150
151
152 class ProfileSetupView(LoginRequiredMixin, View):
153 """A view to make the user profile full_profile"""
154
155 login_url = "accounts_login"
156
157 def get(self, request):
158 profile = Profile.objects.get(user=request.user)
159 if profile.full_profile:
160 return HttpResponseRedirect("/")
161 # start temp rep rendering TODO: REMOVE THIS
162 else:
163 data = {
164 "username": request.user.username,
165 "email": request.user.email,
166 }
167 return TemplateResponse(request, "accounts/user-setup.html", data)
168
169
170 @login_required
171 @full_profile
172 def user_profile(request, username=None):
173 User = get_user_model()
174 if request.method == "GET":
175 if not username:
176 return HttpResponseRedirect(f"/profile/{request.user}")
177 else:
178 is_owner = username == request.user.username
179 try:
180 user = User.objects.get(username=username)
181 profile = user.profile_set.first()
182 except User.DoesNotExist:
183 return HttpResponseRedirect("/404")
184
185 form = ProfileEditForm(
186 initial={
187 "username": user.username,
188 "email": user.email,
189 "first_name": profile.first_name or None,
190 "last_name": profile.last_name or None,
191 "about_me": profile.about_me or None,
192 },
193 readonly=True,
194 )
195 data = {
196 "username": user,
197 "profile_image_form": UpdateProfileImage,
198 "form": form if is_owner else None,
199 "readonly": True,
200 }
201 return TemplateResponse(request, "account.html", data)
202
[end of project/accounts/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/project/accounts/views.py b/project/accounts/views.py
--- a/project/accounts/views.py
+++ b/project/accounts/views.py
@@ -82,7 +82,7 @@
login_url = "accounts_login"
form_class = ProfileEditForm
success_url = reverse_lazy("accounts_settings")
- template_name = "accounts/utils/update_settings.html"
+ template_name = "accounts/update_settings.html"
def get_object(self, queryset=None):
return Profile.objects.get(user=self.request.user)
@@ -126,7 +126,6 @@
"content": "You have already verified your email",
"link": redirect_link,
}
- return TemplateResponse(request, "general-message.html", template_var)
else:
profile.is_verified = True
profile.save()
@@ -137,7 +136,6 @@
"content": "Thank you for verifying your email with CiviWiki",
"link": redirect_link,
}
- return TemplateResponse(request, "general-message.html", template_var)
else:
# invalid link
redirect_link = {"href": "/", "label": "Back to Main"}
@@ -146,7 +144,8 @@
"content": "Email could not be verified",
"link": redirect_link,
}
- return TemplateResponse(request, "general-message.html", template_var)
+
+ return TemplateResponse(request, "general_message.html", template_var)
class ProfileSetupView(LoginRequiredMixin, View):
| {"golden_diff": "diff --git a/project/accounts/views.py b/project/accounts/views.py\n--- a/project/accounts/views.py\n+++ b/project/accounts/views.py\n@@ -82,7 +82,7 @@\n login_url = \"accounts_login\"\n form_class = ProfileEditForm\n success_url = reverse_lazy(\"accounts_settings\")\n- template_name = \"accounts/utils/update_settings.html\"\n+ template_name = \"accounts/update_settings.html\"\n \n def get_object(self, queryset=None):\n return Profile.objects.get(user=self.request.user)\n@@ -126,7 +126,6 @@\n \"content\": \"You have already verified your email\",\n \"link\": redirect_link,\n }\n- return TemplateResponse(request, \"general-message.html\", template_var)\n else:\n profile.is_verified = True\n profile.save()\n@@ -137,7 +136,6 @@\n \"content\": \"Thank you for verifying your email with CiviWiki\",\n \"link\": redirect_link,\n }\n- return TemplateResponse(request, \"general-message.html\", template_var)\n else:\n # invalid link\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n@@ -146,7 +144,8 @@\n \"content\": \"Email could not be verified\",\n \"link\": redirect_link,\n }\n- return TemplateResponse(request, \"general-message.html\", template_var)\n+\n+ return TemplateResponse(request, \"general_message.html\", template_var)\n \n \n class ProfileSetupView(LoginRequiredMixin, View):\n", "issue": "Consolidate Backbone templates for comprehension\nWe are beginning the process of porting Backbone templates to the Django template syntax. However, the Backbone templates are often spread across multiple \"partial\" files, where a single Backbone model is applied against several combined template fragments.\r\n\r\nIn order to make it easier to port our Backbone templates, we can combine the templates into single files, where possible.\r\n\r\n## Task\r\n\r\n- [x] combine Backbone template partials into monolithic template files for easier comprehension\n", "before_files": [{"content": "\"\"\"\nClass based views.\n\nThis module will include views for the accounts app.\n\"\"\"\n\nfrom core.custom_decorators import full_profile, login_required\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model, login\nfrom django.contrib.auth import views as auth_views\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.http import HttpResponseRedirect\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse_lazy\nfrom django.utils.encoding import force_str\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.views import View\nfrom django.views.generic.edit import FormView, UpdateView\n\nfrom accounts.authentication import account_activation_token, send_activation_email\nfrom accounts.forms import ProfileEditForm, UpdateProfileImage, UserRegistrationForm\nfrom accounts.models import Profile\n\n\nclass RegisterView(FormView):\n \"\"\"\n A form view that handles user registration.\n \"\"\"\n\n template_name = \"accounts/register/register.html\"\n form_class = UserRegistrationForm\n success_url = \"/\"\n\n def _create_user(self, form):\n username = form.cleaned_data[\"username\"]\n password = form.cleaned_data[\"password\"]\n email = form.cleaned_data[\"email\"]\n user = get_user_model().objects.create_user(username, email, password)\n return user\n\n def _send_email(self, user):\n domain = get_current_site(self.request).domain\n send_activation_email(user, domain)\n\n def _login(self, user):\n login(self.request, user)\n\n def form_valid(self, form):\n user = self._create_user(form)\n\n self._send_email(user)\n self._login(user)\n\n return super(RegisterView, self).form_valid(form)\n\n\nclass PasswordResetView(auth_views.PasswordResetView):\n template_name = \"accounts/users/password_reset.html\"\n email_template_name = \"accounts/users/password_reset_email.html\"\n subject_template_name = \"accounts/users/password_reset_subject.txt\"\n from_email = settings.EMAIL_HOST_USER\n success_url = reverse_lazy(\"accounts_password_reset_done\")\n\n\nclass PasswordResetDoneView(auth_views.PasswordResetDoneView):\n template_name = \"accounts/users/password_reset_done.html\"\n\n\nclass PasswordResetConfirmView(auth_views.PasswordResetConfirmView):\n template_name = \"accounts/users/password_reset_confirm.html\"\n success_url = reverse_lazy(\"accounts_password_reset_complete\")\n\n\nclass PasswordResetCompleteView(auth_views.PasswordResetCompleteView):\n template_name = \"accounts/users/password_reset_complete.html\"\n\n\nclass SettingsView(LoginRequiredMixin, UpdateView):\n \"\"\"A form view to edit Profile\"\"\"\n\n login_url = \"accounts_login\"\n form_class = ProfileEditForm\n success_url = reverse_lazy(\"accounts_settings\")\n template_name = \"accounts/utils/update_settings.html\"\n\n def get_object(self, queryset=None):\n return Profile.objects.get(user=self.request.user)\n\n def get_initial(self):\n profile = Profile.objects.get(user=self.request.user)\n self.initial.update(\n {\n \"username\": profile.user.username,\n \"email\": profile.user.email,\n \"first_name\": profile.first_name or None,\n \"last_name\": profile.last_name or None,\n \"about_me\": profile.about_me or None,\n }\n )\n return super(SettingsView, self).get_initial()\n\n\nclass ProfileActivationView(View):\n \"\"\"\n This shows different views to the user when they are verifying\n their account based on whether they are already verified or not.\n \"\"\"\n\n def get(self, request, uidb64, token):\n\n User = get_user_model()\n try:\n uid = force_str(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n profile = Profile.objects.get(user=user)\n if profile.is_verified:\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Already Verified\",\n \"content\": \"You have already verified your email\",\n \"link\": redirect_link,\n }\n return TemplateResponse(request, \"general-message.html\", template_var)\n else:\n profile.is_verified = True\n profile.save()\n\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Verification Successful\",\n \"content\": \"Thank you for verifying your email with CiviWiki\",\n \"link\": redirect_link,\n }\n return TemplateResponse(request, \"general-message.html\", template_var)\n else:\n # invalid link\n redirect_link = {\"href\": \"/\", \"label\": \"Back to Main\"}\n template_var = {\n \"title\": \"Email Verification Error\",\n \"content\": \"Email could not be verified\",\n \"link\": redirect_link,\n }\n return TemplateResponse(request, \"general-message.html\", template_var)\n\n\nclass ProfileSetupView(LoginRequiredMixin, View):\n \"\"\"A view to make the user profile full_profile\"\"\"\n\n login_url = \"accounts_login\"\n\n def get(self, request):\n profile = Profile.objects.get(user=request.user)\n if profile.full_profile:\n return HttpResponseRedirect(\"/\")\n # start temp rep rendering TODO: REMOVE THIS\n else:\n data = {\n \"username\": request.user.username,\n \"email\": request.user.email,\n }\n return TemplateResponse(request, \"accounts/user-setup.html\", data)\n\n\n@login_required\n@full_profile\ndef user_profile(request, username=None):\n User = get_user_model()\n if request.method == \"GET\":\n if not username:\n return HttpResponseRedirect(f\"/profile/{request.user}\")\n else:\n is_owner = username == request.user.username\n try:\n user = User.objects.get(username=username)\n profile = user.profile_set.first()\n except User.DoesNotExist:\n return HttpResponseRedirect(\"/404\")\n\n form = ProfileEditForm(\n initial={\n \"username\": user.username,\n \"email\": user.email,\n \"first_name\": profile.first_name or None,\n \"last_name\": profile.last_name or None,\n \"about_me\": profile.about_me or None,\n },\n readonly=True,\n )\n data = {\n \"username\": user,\n \"profile_image_form\": UpdateProfileImage,\n \"form\": form if is_owner else None,\n \"readonly\": True,\n }\n return TemplateResponse(request, \"account.html\", data)\n", "path": "project/accounts/views.py"}]} | 2,502 | 326 |
gh_patches_debug_64991 | rasdani/github-patches | git_diff | conda-forge__conda-smithy-864 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Autogenerated README.md missing final newline
## The Problem
As I've confirmed is the case on multiple repos here, including our own ``spyder-feedstock`` and ``spyder-kernels-feedstock`` as well as two arbitrary conda-forge repos I checked conda-forge, the last line in README.md lacks a terminating newline (LF/``x0D``), and is thus ill-formed. I'd be happy to submit a PR to fix it since I imagine it is probably pretty trivial, if someone more knowlegable than me can let me know how to approach it.
## Proposed Solutions
A naive hack would seem to be just writing an additional ``\n`` [here](https://github.com/conda-forge/conda-smithy/blob/855f23bb96efb1cbdbdc5e60dfb9bbdd3e142d31/conda_smithy/configure_feedstock.py#L718), but editing the [template ](https://github.com/conda-forge/conda-smithy/blob/master/conda_smithy/templates/README.md.tmpl) would seem to make far more sense. However, the template *has* a trailing newline and hasn't been edited in a while, so not sure what's going on—is it not writing the last one; is it getting stripped, or what?
Thanks!
</issue>
<code>
[start of conda_smithy/vendored/__init__.py]
[end of conda_smithy/vendored/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/conda_smithy/vendored/__init__.py b/conda_smithy/vendored/__init__.py
--- a/conda_smithy/vendored/__init__.py
+++ b/conda_smithy/vendored/__init__.py
@@ -0,0 +1 @@
+
| {"golden_diff": "diff --git a/conda_smithy/vendored/__init__.py b/conda_smithy/vendored/__init__.py\n--- a/conda_smithy/vendored/__init__.py\n+++ b/conda_smithy/vendored/__init__.py\n@@ -0,0 +1 @@\n+\n", "issue": "Autogenerated README.md missing final newline\n## The Problem\r\n\r\nAs I've confirmed is the case on multiple repos here, including our own ``spyder-feedstock`` and ``spyder-kernels-feedstock`` as well as two arbitrary conda-forge repos I checked conda-forge, the last line in README.md lacks a terminating newline (LF/``x0D``), and is thus ill-formed. I'd be happy to submit a PR to fix it since I imagine it is probably pretty trivial, if someone more knowlegable than me can let me know how to approach it. \r\n\r\n## Proposed Solutions\r\n\r\nA naive hack would seem to be just writing an additional ``\\n`` [here](https://github.com/conda-forge/conda-smithy/blob/855f23bb96efb1cbdbdc5e60dfb9bbdd3e142d31/conda_smithy/configure_feedstock.py#L718), but editing the [template ](https://github.com/conda-forge/conda-smithy/blob/master/conda_smithy/templates/README.md.tmpl) would seem to make far more sense. However, the template *has* a trailing newline and hasn't been edited in a while, so not sure what's going on\u2014is it not writing the last one; is it getting stripped, or what?\r\n\r\nThanks!\n", "before_files": [{"content": "", "path": "conda_smithy/vendored/__init__.py"}]} | 842 | 69 |
gh_patches_debug_32138 | rasdani/github-patches | git_diff | bokeh__bokeh-4812 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
MultiSelect not rendering correctly if `options` is `list(dict)`
From the sources of MultiSelect widget I see the possibility to provide `options` as:
`options = List(Either(String, Dict(String, String))`
But the HTML-output looks wrong:
SelectBox with `[object Object]`
Example showing the issue:
```
from bokeh.models.widgets import MultiSelect
from bokeh.io import show
from bokeh.layouts import widgetbox
multi_select = MultiSelect(title="Option Works:", options=['a', 'b', 'c'])
multi_select_wrong = MultiSelect(title="Option Wrong:", options=[{'name': 'a', 'value': 'a'}, {'name': 'b', 'value': 'b'}, {'name': 'c', 'value': 'c'}])
show(widgetbox(multi_select, multi_select_wrong))
```
My environment:
```
In [11]: bokeh.__version__
Out[11]: '0.12.1dev2'
```
the `version.js` has `version = '0.12.0'` (perhaps the revision was not updated in the dev-build)

Thanks for bokeh, it's an incredible tool.
</issue>
<code>
[start of bokeh/models/widgets/inputs.py]
1 """ Various kinds of input widgets and form controls.
2
3 """
4 from __future__ import absolute_import
5
6 import six
7
8 from ...core.properties import abstract
9 from ...core.properties import Bool, Int, Float, String, Date, RelativeDelta, Enum, List, Dict, Tuple, Either, Instance
10 from ..callbacks import Callback
11 from .widget import Widget
12 from ...core.enums import SliderCallbackPolicy
13
14 @abstract
15 class InputWidget(Widget):
16 """ Abstract base class for input widgets. `InputWidget`` is not
17 generally useful to instantiate on its own.
18
19 """
20
21 title = String(default="", help="""
22 Widget's label.
23 """)
24
25 @classmethod
26 def coerce_value(cls, val):
27 prop_obj = cls.lookup('value')
28 if isinstance(prop_obj, Float):
29 return float(val)
30 elif isinstance(prop_obj, Int):
31 return int(val)
32 elif isinstance(prop_obj, String):
33 return str(val)
34 else:
35 return val
36
37 @classmethod
38 def create(cls, *args, **kwargs):
39 """ Only called the first time we make an object,
40 whereas __init__ is called every time it's loaded
41
42 """
43 if kwargs.get('title') is None:
44 kwargs['title'] = kwargs['name']
45 if kwargs.get('value') is not None:
46 kwargs['value'] = cls.coerce_value(kwargs.get('value'))
47 return cls(**kwargs)
48
49 class TextInput(InputWidget):
50 """ Single-line input widget. """
51
52 value = String(default="", help="""
53 Initial or entered text value.
54 """)
55
56 callback = Instance(Callback, help="""
57 A callback to run in the browser whenever the user unfocuses the TextInput
58 widget by hitting Enter or clicking outside of the text box area.
59 """)
60
61
62 class AutocompleteInput(TextInput):
63 """ Single-line input widget with auto-completion. """
64
65 completions = List(String, help="""
66 A list of completion strings. This will be used to guide the
67 user upon typing the beginning of a desired value.
68 """)
69
70
71 class Select(InputWidget):
72 """ Single-select widget.
73
74 """
75
76 options = List(Either(String, Dict(String, String)), help="""
77 Available selection options.
78 """)
79
80 value = String(default="", help="""
81 Initial or selected value.
82 """)
83
84 callback = Instance(Callback, help="""
85 A callback to run in the browser whenever the current Select dropdown
86 value changes.
87 """)
88
89 @classmethod
90 def create(self, *args, **kwargs):
91 options = kwargs.pop('options', [])
92 new_options = []
93 for opt in options:
94 if isinstance(opt, six.string_types):
95 opt = {'name' : opt, 'value' : opt}
96 new_options.append(opt)
97 kwargs['options'] = new_options
98 return super(Select, self).create(*args, **kwargs)
99
100 class MultiSelect(InputWidget):
101 """ Multi-select widget.
102
103 """
104
105 options = List(Either(String, Dict(String, String)), help="""
106 Available selection options.
107 """)
108
109 value = List(String, help="""
110 Initial or selected values.
111 """)
112
113 callback = Instance(Callback, help="""
114 A callback to run in the browser whenever the current dropdown value
115 changes.
116 """)
117
118 @classmethod
119 def create(self, *args, **kwargs):
120 options = kwargs.pop('options', [])
121 new_options = []
122 for opt in options:
123 if isinstance(opt, six.string_types):
124 opt = {'name' : opt, 'value' : opt}
125 new_options.append(opt)
126 kwargs['options'] = new_options
127 return super(Select, self).create(*args, **kwargs)
128
129 class Slider(InputWidget):
130 """ Slider-based number selection widget.
131
132 """
133
134 value = Float(default=0.5, help="""
135 Initial or selected value.
136 """)
137
138 start = Float(default=0, help="""
139 The minimum allowable value.
140 """)
141
142 end = Float(default=1, help="""
143 The maximum allowable value.
144 """)
145
146 step = Float(default=0.1, help="""
147 The step between consecutive values.
148 """)
149
150 orientation = Enum("horizontal", "vertical", help="""
151 Orient the slider either horizontally (default) or vertically.
152 """)
153
154 callback = Instance(Callback, help="""
155 A callback to run in the browser whenever the current Slider value changes.
156 """)
157
158 callback_throttle = Float(default=200, help="""
159 Number of microseconds to pause between callback calls as the slider is moved.
160 """)
161
162 callback_policy = Enum(SliderCallbackPolicy, default="throttle", help="""
163 When the callback is initiated. This parameter can take on only one of three options:
164
165 "continuous": the callback will be executed immediately for each movement of the slider
166 "throttle": the callback will be executed at most every ``callback_throttle`` milliseconds.
167 "mouseup": the callback will be executed only once when the slider is released.
168
169 The `mouseup` policy is intended for scenarios in which the callback is expensive in time.
170 """)
171
172 class DateRangeSlider(InputWidget):
173 """ Slider-based date range selection widget.
174
175 """
176
177 value = Tuple(Date, Date, help="""
178 The initial or selected date range.
179 """)
180
181 bounds = Tuple(Date, Date, help="""
182 The earliest and latest allowable dates.
183 """)
184
185 range = Tuple(RelativeDelta, RelativeDelta, help="""
186 [TDB]
187 """)
188
189 step = RelativeDelta(help="""
190 The step between consecutive dates.
191 """)
192
193 # formatter = Either(String, Function(Date))
194 # scales = DateRangeSliderScales ... # first, next, stop, label, format
195
196 enabled = Bool(True, help="""
197 Enable or disable this widget.
198 """)
199
200 arrows = Bool(True, help="""
201 Whether to show clickable arrows on both ends of the slider.
202 """)
203
204 value_labels = Enum("show", "hide", "change", help="""
205 Show or hide value labels on both sides of the slider.
206 """)
207
208 wheel_mode = Enum("scroll", "zoom", default=None, help="""
209 Whether mouse zoom should scroll or zoom selected range (or
210 do nothing).
211 """)
212
213 callback = Instance(Callback, help="""
214 A callback to run in the browser whenever either slider's value changes.
215 """)
216
217 class DatePicker(InputWidget):
218 """ Calendar-based date picker widget.
219
220 """
221
222 value = Date(help="""
223 The initial or picked date.
224 """)
225
226 min_date = Date(default=None, help="""
227 Optional earliest allowable date.
228 """)
229
230 max_date = Date(default=None, help="""
231 Optional latest allowable date.
232 """)
233
234 callback = Instance(Callback, help="""
235 A callback to run in the browser whenever the current date value changes.
236 """)
237
[end of bokeh/models/widgets/inputs.py]
[start of sphinx/source/docs/user_guide/source_examples/interaction_multiselect.py]
1 from bokeh.io import output_file, show
2 from bokeh.layouts import widgetbox
3 from bokeh.models.widgets import MultiSelect
4
5 output_file("multi_select.html")
6
7 multi_select = MultiSelect(title="Option:", value=["foo", "quux"],
8 options=["foo", "bar", "baz", "quux"])
9
10 show(widgetbox(multi_select))
11
[end of sphinx/source/docs/user_guide/source_examples/interaction_multiselect.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bokeh/models/widgets/inputs.py b/bokeh/models/widgets/inputs.py
--- a/bokeh/models/widgets/inputs.py
+++ b/bokeh/models/widgets/inputs.py
@@ -6,7 +6,7 @@
import six
from ...core.properties import abstract
-from ...core.properties import Bool, Int, Float, String, Date, RelativeDelta, Enum, List, Dict, Tuple, Either, Instance
+from ...core.properties import Bool, Int, Float, String, Date, RelativeDelta, Enum, List, Tuple, Either, Instance
from ..callbacks import Callback
from .widget import Widget
from ...core.enums import SliderCallbackPolicy
@@ -73,8 +73,11 @@
"""
- options = List(Either(String, Dict(String, String)), help="""
- Available selection options.
+ options = List(Either(String, Tuple(String, String)), help="""
+ Available selection options. Options may be provided either as a list of
+ possible string values, or as a list of tuples, each of the form
+ ``(value, label)``. In the latter case, the visible widget text for each
+ value will be corresponding given label.
""")
value = String(default="", help="""
@@ -102,8 +105,11 @@
"""
- options = List(Either(String, Dict(String, String)), help="""
- Available selection options.
+ options = List(Either(String, Tuple(String, String)), help="""
+ Available selection options. Options may be provided either as a list of
+ possible string values, or as a list of tuples, each of the form
+ ``(value, label)``. In the latter case, the visible widget text for each
+ value will be corresponding given label.
""")
value = List(String, help="""
diff --git a/sphinx/source/docs/user_guide/source_examples/interaction_multiselect.py b/sphinx/source/docs/user_guide/source_examples/interaction_multiselect.py
--- a/sphinx/source/docs/user_guide/source_examples/interaction_multiselect.py
+++ b/sphinx/source/docs/user_guide/source_examples/interaction_multiselect.py
@@ -5,6 +5,6 @@
output_file("multi_select.html")
multi_select = MultiSelect(title="Option:", value=["foo", "quux"],
- options=["foo", "bar", "baz", "quux"])
+ options=[("foo", "Foo"), ("bar", "BAR"), ("baz", "bAz"), ("quux", "quux")])
show(widgetbox(multi_select))
| {"golden_diff": "diff --git a/bokeh/models/widgets/inputs.py b/bokeh/models/widgets/inputs.py\n--- a/bokeh/models/widgets/inputs.py\n+++ b/bokeh/models/widgets/inputs.py\n@@ -6,7 +6,7 @@\n import six\n \n from ...core.properties import abstract\n-from ...core.properties import Bool, Int, Float, String, Date, RelativeDelta, Enum, List, Dict, Tuple, Either, Instance\n+from ...core.properties import Bool, Int, Float, String, Date, RelativeDelta, Enum, List, Tuple, Either, Instance\n from ..callbacks import Callback\n from .widget import Widget\n from ...core.enums import SliderCallbackPolicy\n@@ -73,8 +73,11 @@\n \n \"\"\"\n \n- options = List(Either(String, Dict(String, String)), help=\"\"\"\n- Available selection options.\n+ options = List(Either(String, Tuple(String, String)), help=\"\"\"\n+ Available selection options. Options may be provided either as a list of\n+ possible string values, or as a list of tuples, each of the form\n+ ``(value, label)``. In the latter case, the visible widget text for each\n+ value will be corresponding given label.\n \"\"\")\n \n value = String(default=\"\", help=\"\"\"\n@@ -102,8 +105,11 @@\n \n \"\"\"\n \n- options = List(Either(String, Dict(String, String)), help=\"\"\"\n- Available selection options.\n+ options = List(Either(String, Tuple(String, String)), help=\"\"\"\n+ Available selection options. Options may be provided either as a list of\n+ possible string values, or as a list of tuples, each of the form\n+ ``(value, label)``. In the latter case, the visible widget text for each\n+ value will be corresponding given label.\n \"\"\")\n \n value = List(String, help=\"\"\"\ndiff --git a/sphinx/source/docs/user_guide/source_examples/interaction_multiselect.py b/sphinx/source/docs/user_guide/source_examples/interaction_multiselect.py\n--- a/sphinx/source/docs/user_guide/source_examples/interaction_multiselect.py\n+++ b/sphinx/source/docs/user_guide/source_examples/interaction_multiselect.py\n@@ -5,6 +5,6 @@\n output_file(\"multi_select.html\")\n \n multi_select = MultiSelect(title=\"Option:\", value=[\"foo\", \"quux\"],\n- options=[\"foo\", \"bar\", \"baz\", \"quux\"])\n+ options=[(\"foo\", \"Foo\"), (\"bar\", \"BAR\"), (\"baz\", \"bAz\"), (\"quux\", \"quux\")])\n \n show(widgetbox(multi_select))\n", "issue": "MultiSelect not rendering correctly if `options` is `list(dict)` \nFrom the sources of MultiSelect widget I see the possibility to provide `options` as:\n`options = List(Either(String, Dict(String, String))`\n\nBut the HTML-output looks wrong:\nSelectBox with `[object Object]`\n\nExample showing the issue:\n\n```\nfrom bokeh.models.widgets import MultiSelect\nfrom bokeh.io import show\nfrom bokeh.layouts import widgetbox\nmulti_select = MultiSelect(title=\"Option Works:\", options=['a', 'b', 'c'])\nmulti_select_wrong = MultiSelect(title=\"Option Wrong:\", options=[{'name': 'a', 'value': 'a'}, {'name': 'b', 'value': 'b'}, {'name': 'c', 'value': 'c'}])\nshow(widgetbox(multi_select, multi_select_wrong))\n```\n\nMy environment:\n\n```\nIn [11]: bokeh.__version__\nOut[11]: '0.12.1dev2'\n```\n\nthe `version.js` has `version = '0.12.0'` (perhaps the revision was not updated in the dev-build)\n\n\n\nThanks for bokeh, it's an incredible tool.\n\n", "before_files": [{"content": "\"\"\" Various kinds of input widgets and form controls.\n\n\"\"\"\nfrom __future__ import absolute_import\n\nimport six\n\nfrom ...core.properties import abstract\nfrom ...core.properties import Bool, Int, Float, String, Date, RelativeDelta, Enum, List, Dict, Tuple, Either, Instance\nfrom ..callbacks import Callback\nfrom .widget import Widget\nfrom ...core.enums import SliderCallbackPolicy\n\n@abstract\nclass InputWidget(Widget):\n \"\"\" Abstract base class for input widgets. `InputWidget`` is not\n generally useful to instantiate on its own.\n\n \"\"\"\n\n title = String(default=\"\", help=\"\"\"\n Widget's label.\n \"\"\")\n\n @classmethod\n def coerce_value(cls, val):\n prop_obj = cls.lookup('value')\n if isinstance(prop_obj, Float):\n return float(val)\n elif isinstance(prop_obj, Int):\n return int(val)\n elif isinstance(prop_obj, String):\n return str(val)\n else:\n return val\n\n @classmethod\n def create(cls, *args, **kwargs):\n \"\"\" Only called the first time we make an object,\n whereas __init__ is called every time it's loaded\n\n \"\"\"\n if kwargs.get('title') is None:\n kwargs['title'] = kwargs['name']\n if kwargs.get('value') is not None:\n kwargs['value'] = cls.coerce_value(kwargs.get('value'))\n return cls(**kwargs)\n\nclass TextInput(InputWidget):\n \"\"\" Single-line input widget. \"\"\"\n\n value = String(default=\"\", help=\"\"\"\n Initial or entered text value.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the user unfocuses the TextInput\n widget by hitting Enter or clicking outside of the text box area.\n \"\"\")\n\n\nclass AutocompleteInput(TextInput):\n \"\"\" Single-line input widget with auto-completion. \"\"\"\n\n completions = List(String, help=\"\"\"\n A list of completion strings. This will be used to guide the\n user upon typing the beginning of a desired value.\n \"\"\")\n\n\nclass Select(InputWidget):\n \"\"\" Single-select widget.\n\n \"\"\"\n\n options = List(Either(String, Dict(String, String)), help=\"\"\"\n Available selection options.\n \"\"\")\n\n value = String(default=\"\", help=\"\"\"\n Initial or selected value.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the current Select dropdown\n value changes.\n \"\"\")\n\n @classmethod\n def create(self, *args, **kwargs):\n options = kwargs.pop('options', [])\n new_options = []\n for opt in options:\n if isinstance(opt, six.string_types):\n opt = {'name' : opt, 'value' : opt}\n new_options.append(opt)\n kwargs['options'] = new_options\n return super(Select, self).create(*args, **kwargs)\n\nclass MultiSelect(InputWidget):\n \"\"\" Multi-select widget.\n\n \"\"\"\n\n options = List(Either(String, Dict(String, String)), help=\"\"\"\n Available selection options.\n \"\"\")\n\n value = List(String, help=\"\"\"\n Initial or selected values.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the current dropdown value\n changes.\n \"\"\")\n\n @classmethod\n def create(self, *args, **kwargs):\n options = kwargs.pop('options', [])\n new_options = []\n for opt in options:\n if isinstance(opt, six.string_types):\n opt = {'name' : opt, 'value' : opt}\n new_options.append(opt)\n kwargs['options'] = new_options\n return super(Select, self).create(*args, **kwargs)\n\nclass Slider(InputWidget):\n \"\"\" Slider-based number selection widget.\n\n \"\"\"\n\n value = Float(default=0.5, help=\"\"\"\n Initial or selected value.\n \"\"\")\n\n start = Float(default=0, help=\"\"\"\n The minimum allowable value.\n \"\"\")\n\n end = Float(default=1, help=\"\"\"\n The maximum allowable value.\n \"\"\")\n\n step = Float(default=0.1, help=\"\"\"\n The step between consecutive values.\n \"\"\")\n\n orientation = Enum(\"horizontal\", \"vertical\", help=\"\"\"\n Orient the slider either horizontally (default) or vertically.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the current Slider value changes.\n \"\"\")\n\n callback_throttle = Float(default=200, help=\"\"\"\n Number of microseconds to pause between callback calls as the slider is moved.\n \"\"\")\n\n callback_policy = Enum(SliderCallbackPolicy, default=\"throttle\", help=\"\"\"\n When the callback is initiated. This parameter can take on only one of three options:\n\n \"continuous\": the callback will be executed immediately for each movement of the slider\n \"throttle\": the callback will be executed at most every ``callback_throttle`` milliseconds.\n \"mouseup\": the callback will be executed only once when the slider is released.\n\n The `mouseup` policy is intended for scenarios in which the callback is expensive in time.\n \"\"\")\n\nclass DateRangeSlider(InputWidget):\n \"\"\" Slider-based date range selection widget.\n\n \"\"\"\n\n value = Tuple(Date, Date, help=\"\"\"\n The initial or selected date range.\n \"\"\")\n\n bounds = Tuple(Date, Date, help=\"\"\"\n The earliest and latest allowable dates.\n \"\"\")\n\n range = Tuple(RelativeDelta, RelativeDelta, help=\"\"\"\n [TDB]\n \"\"\")\n\n step = RelativeDelta(help=\"\"\"\n The step between consecutive dates.\n \"\"\")\n\n # formatter = Either(String, Function(Date))\n # scales = DateRangeSliderScales ... # first, next, stop, label, format\n\n enabled = Bool(True, help=\"\"\"\n Enable or disable this widget.\n \"\"\")\n\n arrows = Bool(True, help=\"\"\"\n Whether to show clickable arrows on both ends of the slider.\n \"\"\")\n\n value_labels = Enum(\"show\", \"hide\", \"change\", help=\"\"\"\n Show or hide value labels on both sides of the slider.\n \"\"\")\n\n wheel_mode = Enum(\"scroll\", \"zoom\", default=None, help=\"\"\"\n Whether mouse zoom should scroll or zoom selected range (or\n do nothing).\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever either slider's value changes.\n \"\"\")\n\nclass DatePicker(InputWidget):\n \"\"\" Calendar-based date picker widget.\n\n \"\"\"\n\n value = Date(help=\"\"\"\n The initial or picked date.\n \"\"\")\n\n min_date = Date(default=None, help=\"\"\"\n Optional earliest allowable date.\n \"\"\")\n\n max_date = Date(default=None, help=\"\"\"\n Optional latest allowable date.\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the current date value changes.\n \"\"\")\n", "path": "bokeh/models/widgets/inputs.py"}, {"content": "from bokeh.io import output_file, show\nfrom bokeh.layouts import widgetbox\nfrom bokeh.models.widgets import MultiSelect\n\noutput_file(\"multi_select.html\")\n\nmulti_select = MultiSelect(title=\"Option:\", value=[\"foo\", \"quux\"],\n options=[\"foo\", \"bar\", \"baz\", \"quux\"])\n\nshow(widgetbox(multi_select))\n", "path": "sphinx/source/docs/user_guide/source_examples/interaction_multiselect.py"}]} | 3,090 | 569 |
gh_patches_debug_2591 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-6375 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The s3 action "remove-statements" errors out when it encounters a bucket policy statement without a sid
**Describe the bug**
s3.remove-statements fails when a sid-less bucket policy statement is encountered
You can see the key error in the traceback. Bucket policy statements do not require Sids and S3 omits the key from describeBucketPolicy response when it does not exist.
**To Reproduce**
Attempt to use remove-statements to remove a statement from a bucket with a sid-less statement (one example of which is the "aws-sam-cli-managed-default-samclisourcebucket-..." buckets created by AWS SAM CLI.)
**Expected behavior**
I expected the statement which does not contain a SID to be iterated over as non-matching.
**Background (please complete the following information):**
- OS: AWS Lambda
- Python Version: Python 3.8
- Custodian Version: 0.9.8
- Tool Version: n/a
- Cloud Provider: AWS
- Policy: [please exclude any account/sensitive information]
```json
{
"statement_ids": [
"denyAccessToBucket"
],
"type": "remove-statements"
}
```
- Traceback: [if applicable, please exclude sensitive/account information]
[ERROR] KeyError: 'Sid'
Traceback (most recent call last):
File "/var/task/custodian_policy.py", line 4, in run
return handler.dispatch_event(event, context)
File "/var/task/c7n/handler.py", line 165, in dispatch_event
p.push(event, context)
File "/var/task/c7n/policy.py", line 1140, in push
return mode.run(event, lambda_ctx)
File "/var/task/c7n/policy.py", line 853, in run
resources = super(ConfigRuleMode, self).run(event, lambda_context)
File "/var/task/c7n/policy.py", line 453, in run
return self.run_resource_set(event, resources)
File "/var/task/c7n/policy.py", line 483, in run_resource_set
results = action.process(resources)
File "/var/task/c7n/resources/s3.py", line 1272, in process
results += filter(None, [f.result()])
File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 432, in result
return self.__get_result()
File "/var/lang/lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result
raise self._exception
File "/var/lang/lib/python3.8/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
File "/var/task/c7n/resources/s3.py", line 1282, in process_bucket
statements, found = self.process_policy(
File "/var/task/c7n/actions/policy.py", line 21, in process_policy
return remove_statements(
File "/var/task/c7n/actions/policy.py", line 37, in remove_statements
elif s['Sid'] in match_ids:
- `custodian version --debug` output: n/a
**Additional context**
Add any other context about the problem here.
</issue>
<code>
[start of c7n/actions/policy.py]
1 # Copyright The Cloud Custodian Authors.
2 # SPDX-License-Identifier: Apache-2.0
3
4 from .core import BaseAction
5 from c7n import utils
6
7
8 class RemovePolicyBase(BaseAction):
9
10 schema = utils.type_schema(
11 'remove-statements',
12 required=['statement_ids'],
13 statement_ids={'oneOf': [
14 {'enum': ['matched', "*"]},
15 {'type': 'array', 'items': {'type': 'string'}}]})
16
17 def process_policy(self, policy, resource, matched_key):
18 statements = policy.get('Statement', [])
19 resource_statements = resource.get(matched_key, ())
20
21 return remove_statements(
22 self.data['statement_ids'], statements, resource_statements)
23
24
25 def remove_statements(match_ids, statements, matched=()):
26 found = []
27 for s in list(statements):
28 s_found = False
29 if match_ids == '*':
30 s_found = True
31 elif match_ids == 'matched':
32 if s in matched:
33 s_found = True
34 elif s['Sid'] in match_ids:
35 s_found = True
36 if s_found:
37 found.append(s)
38 statements.remove(s)
39 if not found:
40 return None, found
41 return statements, found
42
43
44 class ModifyPolicyBase(BaseAction):
45 """Action to modify resource IAM policy statements.
46
47 Applies to all resources with embedded IAM Policies.
48
49 :example:
50
51 .. code-block:: yaml
52
53 policies:
54 - name: sns-yank-cross-account
55 resource: sns
56 filters:
57 - type: cross-account
58 actions:
59 - type: modify-policy
60 add-statements: [{
61 "Sid": "ReplaceWithMe",
62 "Effect": "Allow",
63 "Principal": "*",
64 "Action": ["SNS:GetTopicAttributes"],
65 "Resource": topic_arn,
66 }]
67 remove-statements: '*'
68 """
69
70 schema_alias = True
71 schema = utils.type_schema(
72 'modify-policy',
73 **{
74 'add-statements': {
75 'type': 'array',
76 'items': {'$ref': '#/definitions/iam-statement'},
77 },
78 'remove-statements': {
79 'type': ['array', 'string'],
80 'oneOf': [
81 {'enum': ['matched', '*']},
82 {'type': 'array', 'items': {'type': 'string'}}
83 ],
84 }
85 }
86 )
87
88 def __init__(self, data=None, manager=None):
89 if manager is not None:
90 config_args = {
91 'account_id': manager.config.account_id,
92 'region': manager.config.region
93 }
94 self.data = utils.format_string_values(data, **config_args)
95 else:
96 self.data = utils.format_string_values(data)
97 self.manager = manager
98
99 def add_statements(self, policy_statements):
100 current = {s['Sid']: s for s in policy_statements}
101 additional = {s['Sid']: s for s in self.data.get('add-statements', [])}
102 current.update(additional)
103 return list(current.values()), bool(additional)
104
105 def remove_statements(self, policy_statements, resource, matched_key):
106 statement_ids = self.data.get('remove-statements', [])
107 found = []
108 if len(statement_ids) == 0:
109 return policy_statements, found
110 resource_statements = resource.get(matched_key, ())
111 return remove_statements(
112 statement_ids, policy_statements, resource_statements)
113
[end of c7n/actions/policy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/c7n/actions/policy.py b/c7n/actions/policy.py
--- a/c7n/actions/policy.py
+++ b/c7n/actions/policy.py
@@ -31,7 +31,7 @@
elif match_ids == 'matched':
if s in matched:
s_found = True
- elif s['Sid'] in match_ids:
+ elif 'Sid' in s and s['Sid'] in match_ids:
s_found = True
if s_found:
found.append(s)
| {"golden_diff": "diff --git a/c7n/actions/policy.py b/c7n/actions/policy.py\n--- a/c7n/actions/policy.py\n+++ b/c7n/actions/policy.py\n@@ -31,7 +31,7 @@\n elif match_ids == 'matched':\n if s in matched:\n s_found = True\n- elif s['Sid'] in match_ids:\n+ elif 'Sid' in s and s['Sid'] in match_ids:\n s_found = True\n if s_found:\n found.append(s)\n", "issue": "The s3 action \"remove-statements\" errors out when it encounters a bucket policy statement without a sid\n**Describe the bug**\r\ns3.remove-statements fails when a sid-less bucket policy statement is encountered\r\n\r\nYou can see the key error in the traceback. Bucket policy statements do not require Sids and S3 omits the key from describeBucketPolicy response when it does not exist.\r\n\r\n**To Reproduce**\r\nAttempt to use remove-statements to remove a statement from a bucket with a sid-less statement (one example of which is the \"aws-sam-cli-managed-default-samclisourcebucket-...\" buckets created by AWS SAM CLI.)\r\n\r\n**Expected behavior**\r\nI expected the statement which does not contain a SID to be iterated over as non-matching.\r\n\r\n**Background (please complete the following information):**\r\n - OS: AWS Lambda\r\n - Python Version: Python 3.8\r\n - Custodian Version: 0.9.8\r\n - Tool Version: n/a\r\n - Cloud Provider: AWS\r\n - Policy: [please exclude any account/sensitive information]\r\n```json\r\n {\r\n \"statement_ids\": [\r\n \"denyAccessToBucket\"\r\n ],\r\n \"type\": \"remove-statements\"\r\n }\r\n```\r\n - Traceback: [if applicable, please exclude sensitive/account information]\r\n [ERROR] KeyError: 'Sid'\r\nTraceback (most recent call last):\r\n File \"/var/task/custodian_policy.py\", line 4, in run\r\n return handler.dispatch_event(event, context)\r\n File \"/var/task/c7n/handler.py\", line 165, in dispatch_event\r\n p.push(event, context)\r\n File \"/var/task/c7n/policy.py\", line 1140, in push\r\n return mode.run(event, lambda_ctx)\r\n File \"/var/task/c7n/policy.py\", line 853, in run\r\n resources = super(ConfigRuleMode, self).run(event, lambda_context)\r\n File \"/var/task/c7n/policy.py\", line 453, in run\r\n return self.run_resource_set(event, resources)\r\n File \"/var/task/c7n/policy.py\", line 483, in run_resource_set\r\n results = action.process(resources)\r\n File \"/var/task/c7n/resources/s3.py\", line 1272, in process\r\n results += filter(None, [f.result()])\r\n File \"/var/lang/lib/python3.8/concurrent/futures/_base.py\", line 432, in result\r\n return self.__get_result()\r\n File \"/var/lang/lib/python3.8/concurrent/futures/_base.py\", line 388, in __get_result\r\n raise self._exception\r\n File \"/var/lang/lib/python3.8/concurrent/futures/thread.py\", line 57, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File \"/var/task/c7n/resources/s3.py\", line 1282, in process_bucket\r\n statements, found = self.process_policy(\r\n File \"/var/task/c7n/actions/policy.py\", line 21, in process_policy\r\n return remove_statements(\r\n File \"/var/task/c7n/actions/policy.py\", line 37, in remove_statements\r\n elif s['Sid'] in match_ids:\r\n - `custodian version --debug` output: n/a\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "# Copyright The Cloud Custodian Authors.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom .core import BaseAction\nfrom c7n import utils\n\n\nclass RemovePolicyBase(BaseAction):\n\n schema = utils.type_schema(\n 'remove-statements',\n required=['statement_ids'],\n statement_ids={'oneOf': [\n {'enum': ['matched', \"*\"]},\n {'type': 'array', 'items': {'type': 'string'}}]})\n\n def process_policy(self, policy, resource, matched_key):\n statements = policy.get('Statement', [])\n resource_statements = resource.get(matched_key, ())\n\n return remove_statements(\n self.data['statement_ids'], statements, resource_statements)\n\n\ndef remove_statements(match_ids, statements, matched=()):\n found = []\n for s in list(statements):\n s_found = False\n if match_ids == '*':\n s_found = True\n elif match_ids == 'matched':\n if s in matched:\n s_found = True\n elif s['Sid'] in match_ids:\n s_found = True\n if s_found:\n found.append(s)\n statements.remove(s)\n if not found:\n return None, found\n return statements, found\n\n\nclass ModifyPolicyBase(BaseAction):\n \"\"\"Action to modify resource IAM policy statements.\n\n Applies to all resources with embedded IAM Policies.\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: sns-yank-cross-account\n resource: sns\n filters:\n - type: cross-account\n actions:\n - type: modify-policy\n add-statements: [{\n \"Sid\": \"ReplaceWithMe\",\n \"Effect\": \"Allow\",\n \"Principal\": \"*\",\n \"Action\": [\"SNS:GetTopicAttributes\"],\n \"Resource\": topic_arn,\n }]\n remove-statements: '*'\n \"\"\"\n\n schema_alias = True\n schema = utils.type_schema(\n 'modify-policy',\n **{\n 'add-statements': {\n 'type': 'array',\n 'items': {'$ref': '#/definitions/iam-statement'},\n },\n 'remove-statements': {\n 'type': ['array', 'string'],\n 'oneOf': [\n {'enum': ['matched', '*']},\n {'type': 'array', 'items': {'type': 'string'}}\n ],\n }\n }\n )\n\n def __init__(self, data=None, manager=None):\n if manager is not None:\n config_args = {\n 'account_id': manager.config.account_id,\n 'region': manager.config.region\n }\n self.data = utils.format_string_values(data, **config_args)\n else:\n self.data = utils.format_string_values(data)\n self.manager = manager\n\n def add_statements(self, policy_statements):\n current = {s['Sid']: s for s in policy_statements}\n additional = {s['Sid']: s for s in self.data.get('add-statements', [])}\n current.update(additional)\n return list(current.values()), bool(additional)\n\n def remove_statements(self, policy_statements, resource, matched_key):\n statement_ids = self.data.get('remove-statements', [])\n found = []\n if len(statement_ids) == 0:\n return policy_statements, found\n resource_statements = resource.get(matched_key, ())\n return remove_statements(\n statement_ids, policy_statements, resource_statements)\n", "path": "c7n/actions/policy.py"}]} | 2,247 | 116 |
gh_patches_debug_3018 | rasdani/github-patches | git_diff | Mailu__Mailu-958 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Using external smtp relay server for outgoing emails
Hi,
I need to use mailchannels.com to relay all outgoing emails from my Mailu install. In this doc is what I need to change in Postfix:
https://mailchannels.zendesk.com/hc/en-us/articles/200262640-Setting-up-for-Postfix
Is there any way to do this in Mailu ?
Thanks,
</issue>
<code>
[start of core/postfix/start.py]
1 #!/usr/bin/python3
2
3 import os
4 import glob
5 import shutil
6 import multiprocessing
7 import logging as log
8 import sys
9 from mailustart import resolve, convert
10
11 from podop import run_server
12
13 log.basicConfig(stream=sys.stderr, level=os.environ.get("LOG_LEVEL", "WARNING"))
14
15 def start_podop():
16 os.setuid(100)
17 url = "http://" + os.environ["ADMIN_ADDRESS"] + "/internal/postfix/"
18 # TODO: Remove verbosity setting from Podop?
19 run_server(0, "postfix", "/tmp/podop.socket", [
20 ("transport", "url", url + "transport/§"),
21 ("alias", "url", url + "alias/§"),
22 ("domain", "url", url + "domain/§"),
23 ("mailbox", "url", url + "mailbox/§"),
24 ("senderaccess", "url", url + "sender/access/§"),
25 ("senderlogin", "url", url + "sender/login/§")
26 ])
27
28 # Actual startup script
29 os.environ["FRONT_ADDRESS"] = resolve(os.environ.get("FRONT_ADDRESS", "front"))
30 os.environ["ADMIN_ADDRESS"] = resolve(os.environ.get("ADMIN_ADDRESS", "admin"))
31 os.environ["HOST_ANTISPAM"] = resolve(os.environ.get("HOST_ANTISPAM", "antispam:11332"))
32 os.environ["HOST_LMTP"] = resolve(os.environ.get("HOST_LMTP", "imap:2525"))
33
34 for postfix_file in glob.glob("/conf/*.cf"):
35 convert(postfix_file, os.path.join("/etc/postfix", os.path.basename(postfix_file)))
36
37 if os.path.exists("/overrides/postfix.cf"):
38 for line in open("/overrides/postfix.cf").read().strip().split("\n"):
39 os.system('postconf -e "{}"'.format(line))
40
41 if os.path.exists("/overrides/postfix.master"):
42 for line in open("/overrides/postfix.master").read().strip().split("\n"):
43 os.system('postconf -Me "{}"'.format(line))
44
45 for map_file in glob.glob("/overrides/*.map"):
46 destination = os.path.join("/etc/postfix", os.path.basename(map_file))
47 shutil.copyfile(map_file, destination)
48 os.system("postmap {}".format(destination))
49 os.remove(destination)
50
51 convert("/conf/rsyslog.conf", "/etc/rsyslog.conf")
52
53 # Run Podop and Postfix
54 multiprocessing.Process(target=start_podop).start()
55 if os.path.exists("/var/run/rsyslogd.pid"):
56 os.remove("/var/run/rsyslogd.pid")
57 os.system("/usr/lib/postfix/post-install meta_directory=/etc/postfix create-missing")
58 os.system("/usr/lib/postfix/master &")
59 os.execv("/usr/sbin/rsyslogd", ["rsyslogd", "-n"])
60
[end of core/postfix/start.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/core/postfix/start.py b/core/postfix/start.py
--- a/core/postfix/start.py
+++ b/core/postfix/start.py
@@ -48,6 +48,11 @@
os.system("postmap {}".format(destination))
os.remove(destination)
+if "RELAYUSER" in os.environ:
+ path = "/etc/postfix/sasl_passwd"
+ convert("/conf/sasl_passwd", path)
+ os.system("postmap {}".format(path))
+
convert("/conf/rsyslog.conf", "/etc/rsyslog.conf")
# Run Podop and Postfix
| {"golden_diff": "diff --git a/core/postfix/start.py b/core/postfix/start.py\n--- a/core/postfix/start.py\n+++ b/core/postfix/start.py\n@@ -48,6 +48,11 @@\n os.system(\"postmap {}\".format(destination))\n os.remove(destination)\n \n+if \"RELAYUSER\" in os.environ:\n+ path = \"/etc/postfix/sasl_passwd\"\n+ convert(\"/conf/sasl_passwd\", path)\n+ os.system(\"postmap {}\".format(path))\n+\n convert(\"/conf/rsyslog.conf\", \"/etc/rsyslog.conf\")\n \n # Run Podop and Postfix\n", "issue": "Using external smtp relay server for outgoing emails\nHi,\r\n\r\nI need to use mailchannels.com to relay all outgoing emails from my Mailu install. In this doc is what I need to change in Postfix:\r\n\r\nhttps://mailchannels.zendesk.com/hc/en-us/articles/200262640-Setting-up-for-Postfix\r\n\r\nIs there any way to do this in Mailu ?\r\n\r\nThanks,\r\n\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport os\nimport glob\nimport shutil\nimport multiprocessing\nimport logging as log\nimport sys\nfrom mailustart import resolve, convert\n\nfrom podop import run_server\n\nlog.basicConfig(stream=sys.stderr, level=os.environ.get(\"LOG_LEVEL\", \"WARNING\"))\n\ndef start_podop():\n os.setuid(100)\n url = \"http://\" + os.environ[\"ADMIN_ADDRESS\"] + \"/internal/postfix/\"\n # TODO: Remove verbosity setting from Podop?\n run_server(0, \"postfix\", \"/tmp/podop.socket\", [\n\t\t(\"transport\", \"url\", url + \"transport/\u00a7\"),\n\t\t(\"alias\", \"url\", url + \"alias/\u00a7\"),\n\t\t(\"domain\", \"url\", url + \"domain/\u00a7\"),\n (\"mailbox\", \"url\", url + \"mailbox/\u00a7\"),\n (\"senderaccess\", \"url\", url + \"sender/access/\u00a7\"),\n (\"senderlogin\", \"url\", url + \"sender/login/\u00a7\")\n ])\n\n# Actual startup script\nos.environ[\"FRONT_ADDRESS\"] = resolve(os.environ.get(\"FRONT_ADDRESS\", \"front\"))\nos.environ[\"ADMIN_ADDRESS\"] = resolve(os.environ.get(\"ADMIN_ADDRESS\", \"admin\"))\nos.environ[\"HOST_ANTISPAM\"] = resolve(os.environ.get(\"HOST_ANTISPAM\", \"antispam:11332\"))\nos.environ[\"HOST_LMTP\"] = resolve(os.environ.get(\"HOST_LMTP\", \"imap:2525\"))\n\nfor postfix_file in glob.glob(\"/conf/*.cf\"):\n convert(postfix_file, os.path.join(\"/etc/postfix\", os.path.basename(postfix_file)))\n\nif os.path.exists(\"/overrides/postfix.cf\"):\n for line in open(\"/overrides/postfix.cf\").read().strip().split(\"\\n\"):\n os.system('postconf -e \"{}\"'.format(line))\n\nif os.path.exists(\"/overrides/postfix.master\"):\n for line in open(\"/overrides/postfix.master\").read().strip().split(\"\\n\"):\n os.system('postconf -Me \"{}\"'.format(line))\n\nfor map_file in glob.glob(\"/overrides/*.map\"):\n destination = os.path.join(\"/etc/postfix\", os.path.basename(map_file))\n shutil.copyfile(map_file, destination)\n os.system(\"postmap {}\".format(destination))\n os.remove(destination)\n\nconvert(\"/conf/rsyslog.conf\", \"/etc/rsyslog.conf\")\n\n# Run Podop and Postfix\nmultiprocessing.Process(target=start_podop).start()\nif os.path.exists(\"/var/run/rsyslogd.pid\"):\n os.remove(\"/var/run/rsyslogd.pid\")\nos.system(\"/usr/lib/postfix/post-install meta_directory=/etc/postfix create-missing\")\nos.system(\"/usr/lib/postfix/master &\")\nos.execv(\"/usr/sbin/rsyslogd\", [\"rsyslogd\", \"-n\"])\n", "path": "core/postfix/start.py"}]} | 1,337 | 131 |
gh_patches_debug_9532 | rasdani/github-patches | git_diff | google__flax-2717 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add two aliases to activations import
Add the JAX standardize alias for normalize as well, see #2088.
This is currently blocked since JAX just added these so we should add them once JAX rolls out a new verison.
</issue>
<code>
[start of flax/linen/__init__.py]
1 # Copyright 2022 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """The Flax Module system."""
16
17
18 # pylint: disable=g-multiple-import,useless-import-alias
19 # re-export commonly used modules and functions
20 from .activation import (
21 PReLU as PReLU,
22 celu as celu,
23 elu as elu,
24 gelu as gelu,
25 glu as glu,
26 hard_sigmoid as hard_sigmoid,
27 hard_silu as hard_silu,
28 hard_swish as hard_swish,
29 hard_tanh as hard_tanh,
30 leaky_relu as leaky_relu,
31 log_sigmoid as log_sigmoid,
32 log_softmax as log_softmax,
33 logsumexp as logsumexp,
34 normalize as normalize,
35 one_hot as one_hot,
36 relu as relu,
37 relu6 as relu6,
38 selu as selu,
39 sigmoid as sigmoid,
40 silu as silu,
41 soft_sign as soft_sign,
42 softmax as softmax,
43 softplus as softplus,
44 swish as swish,
45 tanh as tanh
46 )
47 from .attention import (
48 MultiHeadDotProductAttention as MultiHeadDotProductAttention,
49 SelfAttention as SelfAttention,
50 combine_masks as combine_masks,
51 dot_product_attention as dot_product_attention,
52 dot_product_attention_weights as dot_product_attention_weights,
53 make_attention_mask as make_attention_mask,
54 make_causal_mask as make_causal_mask
55 )
56 from .combinators import Sequential as Sequential
57 from ..core import (
58 DenyList as DenyList,
59 FrozenDict as FrozenDict,
60 broadcast as broadcast,
61 meta as meta,
62 )
63 from ..core.meta import (
64 Partitioned as Partitioned,
65 with_partitioning as with_partitioning,
66 get_partition_spec as get_partition_spec,
67 unbox as unbox,
68 PARTITION_NAME as PARTITION_NAME,
69 )
70 from .spmd import (
71 logical_axis_rules as logical_axis_rules,
72 set_logical_axis_rules as set_logical_axis_rules,
73 get_logical_axis_rules as get_logical_axis_rules,
74 logical_to_mesh_axes,
75 logical_to_mesh,
76 with_logical_constraint,
77 LogicallyPartitioned as LogicallyPartitioned,
78 with_logical_partitioning as with_logical_partitioning,
79 )
80 from .initializers import (
81 ones as ones,
82 zeros as zeros
83 )
84 from .linear import (
85 Conv as Conv,
86 ConvLocal as ConvLocal,
87 ConvTranspose as ConvTranspose,
88 Dense as Dense,
89 DenseGeneral as DenseGeneral,
90 Embed as Embed
91 )
92 from .module import (
93 Module as Module,
94 Variable as Variable,
95 apply as apply,
96 compact as compact,
97 disable_named_call as disable_named_call,
98 enable_named_call as enable_named_call,
99 init as init,
100 init_with_output as init_with_output,
101 merge_param as merge_param,
102 nowrap as nowrap,
103 override_named_call as override_named_call
104 )
105 from .normalization import (
106 BatchNorm as BatchNorm,
107 GroupNorm as GroupNorm,
108 LayerNorm as LayerNorm
109 )
110 from .pooling import (
111 avg_pool as avg_pool,
112 max_pool as max_pool,
113 pool as pool
114 )
115 from .recurrent import (
116 ConvLSTM as ConvLSTM,
117 GRUCell as GRUCell,
118 LSTMCell as LSTMCell,
119 OptimizedLSTMCell as OptimizedLSTMCell
120 )
121 from .stochastic import Dropout as Dropout
122 from .transforms import (
123 checkpoint as checkpoint,
124 custom_vjp as custom_vjp,
125 jit as jit,
126 jvp as jvp,
127 map_variables as map_variables,
128 named_call as named_call,
129 remat as remat,
130 remat_scan as remat_scan,
131 scan as scan,
132 vjp as vjp,
133 vmap as vmap,
134 while_loop as while_loop,
135 cond as cond,
136 switch as switch,
137 )
138 from .summary import tabulate
139 # pylint: enable=g-multiple-import
140
[end of flax/linen/__init__.py]
[start of flax/linen/activation.py]
1 # Copyright 2022 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Activation functions.
16 """
17
18 # pylint: disable=unused-import
19 # re-export activation functions from jax.nn
20 from typing import Any, Optional
21
22 from flax.linen.module import compact
23 from flax.linen.module import Module
24
25 from jax.nn import celu
26 from jax.nn import elu
27 from jax.nn import gelu
28 from jax.nn import glu
29 from jax.nn import hard_sigmoid
30 from jax.nn import hard_silu
31 from jax.nn import hard_swish
32 from jax.nn import hard_tanh
33 from jax.nn import leaky_relu
34 from jax.nn import log_sigmoid
35 from jax.nn import log_softmax
36 from jax.nn import logsumexp
37 from jax.nn import normalize
38 from jax.nn import one_hot
39 from jax.nn import relu
40 from jax.nn import relu6
41 from jax.nn import selu
42 from jax.nn import sigmoid
43 from jax.nn import silu
44 from jax.nn import soft_sign
45 from jax.nn import softmax
46 from jax.nn import softplus
47 from jax.nn import swish
48 import jax.numpy as jnp
49 from jax.numpy import tanh
50
51 # pylint: enable=unused-import
52
53
54 Array = Any
55 Dtype = Any
56
57
58 class PReLU(Module):
59 """Parametric Rectified Linear Unit (PReLU) activation function.
60
61 Attributes:
62 param_dtype: the dtype passed to parameter initializers (default: float32).
63 negative_slope_init: the value to initialize the negative slope
64 (default 0.01).
65 """
66 param_dtype: Dtype = jnp.float32
67 negative_slope_init: float = 0.01
68
69 @compact
70 def __call__(self, inputs: Array) -> Array:
71 """Applies an activation to the inputs.
72
73 Args:
74 inputs: the nd-array to apply the activation function to.
75
76 Returns:
77 The transformed input.
78 """
79 negative_slope = self.param(
80 'negative_slope',
81 lambda k: jnp.asarray(self.negative_slope_init, self.param_dtype))
82 return jnp.where(inputs >= 0, inputs,
83 jnp.asarray(negative_slope, inputs.dtype) * inputs)
84
[end of flax/linen/activation.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/flax/linen/__init__.py b/flax/linen/__init__.py
--- a/flax/linen/__init__.py
+++ b/flax/linen/__init__.py
@@ -41,6 +41,7 @@
soft_sign as soft_sign,
softmax as softmax,
softplus as softplus,
+ standardize as standardize,
swish as swish,
tanh as tanh
)
diff --git a/flax/linen/activation.py b/flax/linen/activation.py
--- a/flax/linen/activation.py
+++ b/flax/linen/activation.py
@@ -44,6 +44,7 @@
from jax.nn import soft_sign
from jax.nn import softmax
from jax.nn import softplus
+from jax.nn import standardize
from jax.nn import swish
import jax.numpy as jnp
from jax.numpy import tanh
| {"golden_diff": "diff --git a/flax/linen/__init__.py b/flax/linen/__init__.py\n--- a/flax/linen/__init__.py\n+++ b/flax/linen/__init__.py\n@@ -41,6 +41,7 @@\n soft_sign as soft_sign,\n softmax as softmax,\n softplus as softplus,\n+ standardize as standardize,\n swish as swish,\n tanh as tanh\n )\ndiff --git a/flax/linen/activation.py b/flax/linen/activation.py\n--- a/flax/linen/activation.py\n+++ b/flax/linen/activation.py\n@@ -44,6 +44,7 @@\n from jax.nn import soft_sign\n from jax.nn import softmax\n from jax.nn import softplus\n+from jax.nn import standardize\n from jax.nn import swish\n import jax.numpy as jnp\n from jax.numpy import tanh\n", "issue": "Add two aliases to activations import\nAdd the JAX standardize alias for normalize as well, see #2088.\r\n\r\nThis is currently blocked since JAX just added these so we should add them once JAX rolls out a new verison.\n", "before_files": [{"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The Flax Module system.\"\"\"\n\n\n# pylint: disable=g-multiple-import,useless-import-alias\n# re-export commonly used modules and functions\nfrom .activation import (\n PReLU as PReLU,\n celu as celu,\n elu as elu,\n gelu as gelu,\n glu as glu,\n hard_sigmoid as hard_sigmoid,\n hard_silu as hard_silu,\n hard_swish as hard_swish,\n hard_tanh as hard_tanh,\n leaky_relu as leaky_relu,\n log_sigmoid as log_sigmoid,\n log_softmax as log_softmax,\n logsumexp as logsumexp,\n normalize as normalize,\n one_hot as one_hot,\n relu as relu,\n relu6 as relu6,\n selu as selu,\n sigmoid as sigmoid,\n silu as silu,\n soft_sign as soft_sign,\n softmax as softmax,\n softplus as softplus,\n swish as swish,\n tanh as tanh\n)\nfrom .attention import (\n MultiHeadDotProductAttention as MultiHeadDotProductAttention,\n SelfAttention as SelfAttention,\n combine_masks as combine_masks,\n dot_product_attention as dot_product_attention,\n dot_product_attention_weights as dot_product_attention_weights,\n make_attention_mask as make_attention_mask,\n make_causal_mask as make_causal_mask\n)\nfrom .combinators import Sequential as Sequential\nfrom ..core import (\n DenyList as DenyList,\n FrozenDict as FrozenDict,\n broadcast as broadcast,\n meta as meta,\n)\nfrom ..core.meta import (\n Partitioned as Partitioned,\n with_partitioning as with_partitioning,\n get_partition_spec as get_partition_spec,\n unbox as unbox,\n PARTITION_NAME as PARTITION_NAME,\n)\nfrom .spmd import (\n logical_axis_rules as logical_axis_rules,\n set_logical_axis_rules as set_logical_axis_rules,\n get_logical_axis_rules as get_logical_axis_rules,\n logical_to_mesh_axes,\n logical_to_mesh,\n with_logical_constraint,\n LogicallyPartitioned as LogicallyPartitioned,\n with_logical_partitioning as with_logical_partitioning,\n)\nfrom .initializers import (\n ones as ones,\n zeros as zeros\n)\nfrom .linear import (\n Conv as Conv,\n ConvLocal as ConvLocal,\n ConvTranspose as ConvTranspose,\n Dense as Dense,\n DenseGeneral as DenseGeneral,\n Embed as Embed\n)\nfrom .module import (\n Module as Module,\n Variable as Variable,\n apply as apply,\n compact as compact,\n disable_named_call as disable_named_call,\n enable_named_call as enable_named_call,\n init as init,\n init_with_output as init_with_output,\n merge_param as merge_param,\n nowrap as nowrap,\n override_named_call as override_named_call\n)\nfrom .normalization import (\n BatchNorm as BatchNorm,\n GroupNorm as GroupNorm,\n LayerNorm as LayerNorm\n)\nfrom .pooling import (\n avg_pool as avg_pool,\n max_pool as max_pool,\n pool as pool\n)\nfrom .recurrent import (\n ConvLSTM as ConvLSTM,\n GRUCell as GRUCell,\n LSTMCell as LSTMCell,\n OptimizedLSTMCell as OptimizedLSTMCell\n)\nfrom .stochastic import Dropout as Dropout\nfrom .transforms import (\n checkpoint as checkpoint,\n custom_vjp as custom_vjp,\n jit as jit,\n jvp as jvp,\n map_variables as map_variables,\n named_call as named_call,\n remat as remat,\n remat_scan as remat_scan,\n scan as scan,\n vjp as vjp,\n vmap as vmap,\n while_loop as while_loop,\n cond as cond,\n switch as switch,\n)\nfrom .summary import tabulate\n# pylint: enable=g-multiple-import\n", "path": "flax/linen/__init__.py"}, {"content": "# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Activation functions.\n\"\"\"\n\n# pylint: disable=unused-import\n# re-export activation functions from jax.nn\nfrom typing import Any, Optional\n\nfrom flax.linen.module import compact\nfrom flax.linen.module import Module\n\nfrom jax.nn import celu\nfrom jax.nn import elu\nfrom jax.nn import gelu\nfrom jax.nn import glu\nfrom jax.nn import hard_sigmoid\nfrom jax.nn import hard_silu\nfrom jax.nn import hard_swish\nfrom jax.nn import hard_tanh\nfrom jax.nn import leaky_relu\nfrom jax.nn import log_sigmoid\nfrom jax.nn import log_softmax\nfrom jax.nn import logsumexp\nfrom jax.nn import normalize\nfrom jax.nn import one_hot\nfrom jax.nn import relu\nfrom jax.nn import relu6\nfrom jax.nn import selu\nfrom jax.nn import sigmoid\nfrom jax.nn import silu\nfrom jax.nn import soft_sign\nfrom jax.nn import softmax\nfrom jax.nn import softplus\nfrom jax.nn import swish\nimport jax.numpy as jnp\nfrom jax.numpy import tanh\n\n# pylint: enable=unused-import\n\n\nArray = Any\nDtype = Any\n\n\nclass PReLU(Module):\n \"\"\"Parametric Rectified Linear Unit (PReLU) activation function.\n\n Attributes:\n param_dtype: the dtype passed to parameter initializers (default: float32).\n negative_slope_init: the value to initialize the negative slope\n (default 0.01).\n \"\"\"\n param_dtype: Dtype = jnp.float32\n negative_slope_init: float = 0.01\n\n @compact\n def __call__(self, inputs: Array) -> Array:\n \"\"\"Applies an activation to the inputs.\n\n Args:\n inputs: the nd-array to apply the activation function to.\n\n Returns:\n The transformed input.\n \"\"\"\n negative_slope = self.param(\n 'negative_slope',\n lambda k: jnp.asarray(self.negative_slope_init, self.param_dtype))\n return jnp.where(inputs >= 0, inputs,\n jnp.asarray(negative_slope, inputs.dtype) * inputs)\n", "path": "flax/linen/activation.py"}]} | 2,651 | 214 |
gh_patches_debug_8744 | rasdani/github-patches | git_diff | mindsdb__mindsdb-1749 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Error when importing MindsDB in a Jupyter notebook
**Your Environment**
* Python version: 3.6
* Operating system: Ubuntu
* Mindsdb version: 2.12.2
**Describe the bug**
Importing MindsDB from a Jupyter Notebook fails, apparently because the HTTP API triggers.
**To Reproduce**
1. Run a new Jupyter notebook
2. Execute a cell with `import mindsdb`
The following error should occur:
```usage: ipykernel_launcher.py [-h] [--api API] [--config CONFIG] [--verbose] [-v]
ipykernel_launcher.py: error: unrecognized arguments: -f /home/user/.local/share/jupyter/runtime/kernel.json
An exception has occurred, use %tb to see the full traceback.
SystemExit: 2
/env/lib/python3.6/site-packages/IPython/core/interactiveshell.py:3351: UserWarning: To exit: use 'exit', 'quit', or Ctrl-D.
warn("To exit: use 'exit', 'quit', or Ctrl-D.", stacklevel=1)
```
**Expected behavior**
MindsDB should import successfully.
**Additional note**
`import mindsdb_native` works fine.
</issue>
<code>
[start of mindsdb/utilities/functions.py]
1 import argparse
2 import datetime
3 import requests
4 from functools import wraps
5
6 from mindsdb.utilities.fs import create_process_mark, delete_process_mark
7
8
9 def args_parse():
10 parser = argparse.ArgumentParser(description='CL argument for mindsdb server')
11 parser.add_argument('--api', type=str, default=None)
12 parser.add_argument('--config', type=str, default=None)
13 parser.add_argument('--verbose', action='store_true')
14 parser.add_argument('--no_studio', action='store_true')
15 parser.add_argument('-v', '--version', action='store_true')
16 parser.add_argument('--ray', action='store_true', default=None)
17 return parser.parse_args()
18
19
20 def cast_row_types(row, field_types):
21 '''
22 '''
23 keys = [x for x in row.keys() if x in field_types]
24 for key in keys:
25 t = field_types[key]
26 if t == 'Timestamp' and isinstance(row[key], (int, float)):
27 timestamp = datetime.datetime.utcfromtimestamp(row[key])
28 row[key] = timestamp.strftime('%Y-%m-%d %H:%M:%S')
29 elif t == 'Date' and isinstance(row[key], (int, float)):
30 timestamp = datetime.datetime.utcfromtimestamp(row[key])
31 row[key] = timestamp.strftime('%Y-%m-%d')
32 elif t == 'Int' and isinstance(row[key], (int, float, str)):
33 try:
34 print(f'cast {row[key]} to {int(row[key])}')
35 row[key] = int(row[key])
36 except Exception:
37 pass
38
39
40 def is_notebook():
41 try:
42 shell = get_ipython().__class__.__name__
43 if shell == 'ZMQInteractiveShell':
44 return True # Jupyter notebook or qtconsole
45 elif shell == 'TerminalInteractiveShell':
46 return False # Terminal running IPython
47 else:
48 return False # Other type (?)
49 except NameError:
50 return False # Probably standard Python interpreter
51
52
53 def mark_process(name):
54 def mark_process_wrapper(func):
55 @wraps(func)
56 def wrapper(*args, **kwargs):
57 mark = create_process_mark(name)
58 try:
59 return func(*args, **kwargs)
60 finally:
61 delete_process_mark(name, mark)
62 return wrapper
63 return mark_process_wrapper
64
65
66 def get_versions_where_predictors_become_obsolete():
67 """ Get list of MindsDB versions in which predictors should be retrained
68 Returns:
69 list of str or False
70 """
71 versions_for_updating_predictors = []
72 try:
73 try:
74 res = requests.get(
75 'https://mindsdb-cloud-public-service-files.s3.us-east-2.amazonaws.com/version_for_updating_predictors.txt',
76 timeout=0.5
77 )
78 except (ConnectionError, requests.exceptions.ConnectionError) as e:
79 print(f'Is no connection. {e}')
80 raise
81 except Exception as e:
82 print(f'Is something wrong with getting version_for_updating_predictors.txt: {e}')
83 raise
84
85 if res.status_code != 200:
86 print(f'Cant get version_for_updating_predictors.txt: returned status code = {res.status_code}')
87 raise
88
89 try:
90 versions_for_updating_predictors = res.text.replace(' \t\r', '').split('\n')
91 except Exception as e:
92 print(f'Cant decode compatible-config.json: {e}')
93 raise
94 except Exception:
95 return False, versions_for_updating_predictors
96
97 versions_for_updating_predictors = [x for x in versions_for_updating_predictors if len(x) > 0]
98 return True, versions_for_updating_predictors
99
[end of mindsdb/utilities/functions.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mindsdb/utilities/functions.py b/mindsdb/utilities/functions.py
--- a/mindsdb/utilities/functions.py
+++ b/mindsdb/utilities/functions.py
@@ -39,13 +39,10 @@
def is_notebook():
try:
- shell = get_ipython().__class__.__name__
- if shell == 'ZMQInteractiveShell':
- return True # Jupyter notebook or qtconsole
- elif shell == 'TerminalInteractiveShell':
- return False # Terminal running IPython
+ if 'IPKernelApp' in get_ipython().config:
+ return True
else:
- return False # Other type (?)
+ return False
except NameError:
return False # Probably standard Python interpreter
| {"golden_diff": "diff --git a/mindsdb/utilities/functions.py b/mindsdb/utilities/functions.py\n--- a/mindsdb/utilities/functions.py\n+++ b/mindsdb/utilities/functions.py\n@@ -39,13 +39,10 @@\n \n def is_notebook():\n try:\n- shell = get_ipython().__class__.__name__\n- if shell == 'ZMQInteractiveShell':\n- return True # Jupyter notebook or qtconsole\n- elif shell == 'TerminalInteractiveShell':\n- return False # Terminal running IPython\n+ if 'IPKernelApp' in get_ipython().config:\n+ return True\n else:\n- return False # Other type (?)\n+ return False\n except NameError:\n return False # Probably standard Python interpreter\n", "issue": "Error when importing MindsDB in a Jupyter notebook\n**Your Environment**\r\n\r\n* Python version: 3.6\r\n* Operating system: Ubuntu\r\n* Mindsdb version: 2.12.2\r\n\r\n**Describe the bug**\r\nImporting MindsDB from a Jupyter Notebook fails, apparently because the HTTP API triggers.\r\n\r\n**To Reproduce**\r\n1. Run a new Jupyter notebook\r\n2. Execute a cell with `import mindsdb`\r\n\r\nThe following error should occur:\r\n```usage: ipykernel_launcher.py [-h] [--api API] [--config CONFIG] [--verbose] [-v]\r\nipykernel_launcher.py: error: unrecognized arguments: -f /home/user/.local/share/jupyter/runtime/kernel.json\r\n\r\nAn exception has occurred, use %tb to see the full traceback.\r\nSystemExit: 2\r\n\r\n/env/lib/python3.6/site-packages/IPython/core/interactiveshell.py:3351: UserWarning: To exit: use 'exit', 'quit', or Ctrl-D.\r\n warn(\"To exit: use 'exit', 'quit', or Ctrl-D.\", stacklevel=1)\r\n```\r\n\r\n**Expected behavior**\r\nMindsDB should import successfully.\r\n\r\n**Additional note**\r\n`import mindsdb_native` works fine.\n", "before_files": [{"content": "import argparse\nimport datetime\nimport requests\nfrom functools import wraps\n\nfrom mindsdb.utilities.fs import create_process_mark, delete_process_mark\n\n\ndef args_parse():\n parser = argparse.ArgumentParser(description='CL argument for mindsdb server')\n parser.add_argument('--api', type=str, default=None)\n parser.add_argument('--config', type=str, default=None)\n parser.add_argument('--verbose', action='store_true')\n parser.add_argument('--no_studio', action='store_true')\n parser.add_argument('-v', '--version', action='store_true')\n parser.add_argument('--ray', action='store_true', default=None)\n return parser.parse_args()\n\n\ndef cast_row_types(row, field_types):\n '''\n '''\n keys = [x for x in row.keys() if x in field_types]\n for key in keys:\n t = field_types[key]\n if t == 'Timestamp' and isinstance(row[key], (int, float)):\n timestamp = datetime.datetime.utcfromtimestamp(row[key])\n row[key] = timestamp.strftime('%Y-%m-%d %H:%M:%S')\n elif t == 'Date' and isinstance(row[key], (int, float)):\n timestamp = datetime.datetime.utcfromtimestamp(row[key])\n row[key] = timestamp.strftime('%Y-%m-%d')\n elif t == 'Int' and isinstance(row[key], (int, float, str)):\n try:\n print(f'cast {row[key]} to {int(row[key])}')\n row[key] = int(row[key])\n except Exception:\n pass\n\n\ndef is_notebook():\n try:\n shell = get_ipython().__class__.__name__\n if shell == 'ZMQInteractiveShell':\n return True # Jupyter notebook or qtconsole\n elif shell == 'TerminalInteractiveShell':\n return False # Terminal running IPython\n else:\n return False # Other type (?)\n except NameError:\n return False # Probably standard Python interpreter\n\n\ndef mark_process(name):\n def mark_process_wrapper(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n mark = create_process_mark(name)\n try:\n return func(*args, **kwargs)\n finally:\n delete_process_mark(name, mark)\n return wrapper\n return mark_process_wrapper\n\n\ndef get_versions_where_predictors_become_obsolete():\n \"\"\" Get list of MindsDB versions in which predictors should be retrained\n Returns:\n list of str or False\n \"\"\"\n versions_for_updating_predictors = []\n try:\n try:\n res = requests.get(\n 'https://mindsdb-cloud-public-service-files.s3.us-east-2.amazonaws.com/version_for_updating_predictors.txt',\n timeout=0.5\n )\n except (ConnectionError, requests.exceptions.ConnectionError) as e:\n print(f'Is no connection. {e}')\n raise\n except Exception as e:\n print(f'Is something wrong with getting version_for_updating_predictors.txt: {e}')\n raise\n\n if res.status_code != 200:\n print(f'Cant get version_for_updating_predictors.txt: returned status code = {res.status_code}')\n raise\n\n try:\n versions_for_updating_predictors = res.text.replace(' \\t\\r', '').split('\\n')\n except Exception as e:\n print(f'Cant decode compatible-config.json: {e}')\n raise\n except Exception:\n return False, versions_for_updating_predictors\n\n versions_for_updating_predictors = [x for x in versions_for_updating_predictors if len(x) > 0]\n return True, versions_for_updating_predictors\n", "path": "mindsdb/utilities/functions.py"}]} | 1,761 | 173 |
gh_patches_debug_41995 | rasdani/github-patches | git_diff | networkx__networkx-2721 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug in write_shp when some edges miss attribute(s)
When writing shapefiles with v1.11, `write_shp` uses the last valid values for attributes an edge doesn't have. The reason is that the values of missing `key`s in the dictionary `attributes` are not updated in the edge loop. A work-around is to make sure all attributes are set before using `write_shp` or to remove edges with missing attributes but this requires a separate edge loop. So, it would be nice to have a fix integrated in `write_shp`. Thanks!
</issue>
<code>
[start of networkx/readwrite/nx_shp.py]
1 """
2 *********
3 Shapefile
4 *********
5
6 Generates a networkx.DiGraph from point and line shapefiles.
7
8 "The Esri Shapefile or simply a shapefile is a popular geospatial vector
9 data format for geographic information systems software. It is developed
10 and regulated by Esri as a (mostly) open specification for data
11 interoperability among Esri and other software products."
12 See https://en.wikipedia.org/wiki/Shapefile for additional information.
13 """
14 # Copyright (C) 2004-2017 by
15 # Ben Reilly <[email protected]>
16 # Aric Hagberg <[email protected]>
17 # Dan Schult <[email protected]>
18 # Pieter Swart <[email protected]>
19 # All rights reserved.
20 # BSD license.
21 import networkx as nx
22 __author__ = """Ben Reilly ([email protected])"""
23 __all__ = ['read_shp', 'write_shp']
24
25
26 def read_shp(path, simplify=True, geom_attrs=True):
27 """Generates a networkx.DiGraph from shapefiles. Point geometries are
28 translated into nodes, lines into edges. Coordinate tuples are used as
29 keys. Attributes are preserved, line geometries are simplified into start
30 and end coordinates. Accepts a single shapefile or directory of many
31 shapefiles.
32
33 "The Esri Shapefile or simply a shapefile is a popular geospatial vector
34 data format for geographic information systems software [1]_."
35
36 Parameters
37 ----------
38 path : file or string
39 File, directory, or filename to read.
40
41 simplify: bool
42 If True, simplify line geometries to start and end coordinates.
43 If False, and line feature geometry has multiple segments, the
44 non-geometric attributes for that feature will be repeated for each
45 edge comprising that feature.
46
47 geom_attrs: bool
48 If True, include the Wkb, Wkt and Json geometry attributes with
49 each edge.
50
51 NOTE: if these attributes are available, write_shp will use them
52 to write the geometry. If nodes store the underlying coordinates for
53 the edge geometry as well (as they do when they are read via
54 this method) and they change, your geomety will be out of sync.
55
56
57 Returns
58 -------
59 G : NetworkX graph
60
61 Examples
62 --------
63 >>> G=nx.read_shp('test.shp') # doctest: +SKIP
64
65 References
66 ----------
67 .. [1] https://en.wikipedia.org/wiki/Shapefile
68 """
69 try:
70 from osgeo import ogr
71 except ImportError:
72 raise ImportError("read_shp requires OGR: http://www.gdal.org/")
73
74 if not isinstance(path, str):
75 return
76
77 net = nx.DiGraph()
78 shp = ogr.Open(path)
79 for lyr in shp:
80 fields = [x.GetName() for x in lyr.schema]
81 for f in lyr:
82 flddata = [f.GetField(f.GetFieldIndex(x)) for x in fields]
83 g = f.geometry()
84 attributes = dict(zip(fields, flddata))
85 attributes["ShpName"] = lyr.GetName()
86 # Note: Using layer level geometry type
87 if g.GetGeometryType() == ogr.wkbPoint:
88 net.add_node((g.GetPoint_2D(0)), **attributes)
89 elif g.GetGeometryType() in (ogr.wkbLineString,
90 ogr.wkbMultiLineString):
91 for edge in edges_from_line(g, attributes, simplify,
92 geom_attrs):
93 e1, e2, attr = edge
94 net.add_edge(e1, e2)
95 net[e1][e2].update(attr)
96 else:
97 raise ImportError("GeometryType {} not supported".
98 format(g.GetGeometryType()))
99
100 return net
101
102
103 def edges_from_line(geom, attrs, simplify=True, geom_attrs=True):
104 """
105 Generate edges for each line in geom
106 Written as a helper for read_shp
107
108 Parameters
109 ----------
110
111 geom: ogr line geometry
112 To be converted into an edge or edges
113
114 attrs: dict
115 Attributes to be associated with all geoms
116
117 simplify: bool
118 If True, simplify the line as in read_shp
119
120 geom_attrs: bool
121 If True, add geom attributes to edge as in read_shp
122
123
124 Returns
125 -------
126 edges: generator of edges
127 each edge is a tuple of form
128 (node1_coord, node2_coord, attribute_dict)
129 suitable for expanding into a networkx Graph add_edge call
130 """
131 try:
132 from osgeo import ogr
133 except ImportError:
134 raise ImportError("edges_from_line requires OGR: http://www.gdal.org/")
135
136 if geom.GetGeometryType() == ogr.wkbLineString:
137 if simplify:
138 edge_attrs = attrs.copy()
139 last = geom.GetPointCount() - 1
140 if geom_attrs:
141 edge_attrs["Wkb"] = geom.ExportToWkb()
142 edge_attrs["Wkt"] = geom.ExportToWkt()
143 edge_attrs["Json"] = geom.ExportToJson()
144 yield (geom.GetPoint_2D(0), geom.GetPoint_2D(last), edge_attrs)
145 else:
146 for i in range(0, geom.GetPointCount() - 1):
147 pt1 = geom.GetPoint_2D(i)
148 pt2 = geom.GetPoint_2D(i + 1)
149 edge_attrs = attrs.copy()
150 if geom_attrs:
151 segment = ogr.Geometry(ogr.wkbLineString)
152 segment.AddPoint_2D(pt1[0], pt1[1])
153 segment.AddPoint_2D(pt2[0], pt2[1])
154 edge_attrs["Wkb"] = segment.ExportToWkb()
155 edge_attrs["Wkt"] = segment.ExportToWkt()
156 edge_attrs["Json"] = segment.ExportToJson()
157 del segment
158 yield (pt1, pt2, edge_attrs)
159
160 elif geom.GetGeometryType() == ogr.wkbMultiLineString:
161 for i in range(geom.GetGeometryCount()):
162 geom_i = geom.GetGeometryRef(i)
163 for edge in edges_from_line(geom_i, attrs, simplify, geom_attrs):
164 yield edge
165
166
167 def write_shp(G, outdir):
168 """Writes a networkx.DiGraph to two shapefiles, edges and nodes.
169 Nodes and edges are expected to have a Well Known Binary (Wkb) or
170 Well Known Text (Wkt) key in order to generate geometries. Also
171 acceptable are nodes with a numeric tuple key (x,y).
172
173 "The Esri Shapefile or simply a shapefile is a popular geospatial vector
174 data format for geographic information systems software [1]_."
175
176 Parameters
177 ----------
178 outdir : directory path
179 Output directory for the two shapefiles.
180
181 Returns
182 -------
183 None
184
185 Examples
186 --------
187 nx.write_shp(digraph, '/shapefiles') # doctest +SKIP
188
189 References
190 ----------
191 .. [1] https://en.wikipedia.org/wiki/Shapefile
192 """
193 try:
194 from osgeo import ogr
195 except ImportError:
196 raise ImportError("write_shp requires OGR: http://www.gdal.org/")
197 # easier to debug in python if ogr throws exceptions
198 ogr.UseExceptions()
199
200 def netgeometry(key, data):
201 if 'Wkb' in data:
202 geom = ogr.CreateGeometryFromWkb(data['Wkb'])
203 elif 'Wkt' in data:
204 geom = ogr.CreateGeometryFromWkt(data['Wkt'])
205 elif type(key[0]).__name__ == 'tuple': # edge keys are packed tuples
206 geom = ogr.Geometry(ogr.wkbLineString)
207 _from, _to = key[0], key[1]
208 try:
209 geom.SetPoint(0, *_from)
210 geom.SetPoint(1, *_to)
211 except TypeError:
212 # assume user used tuple of int and choked ogr
213 _ffrom = [float(x) for x in _from]
214 _fto = [float(x) for x in _to]
215 geom.SetPoint(0, *_ffrom)
216 geom.SetPoint(1, *_fto)
217 else:
218 geom = ogr.Geometry(ogr.wkbPoint)
219 try:
220 geom.SetPoint(0, *key)
221 except TypeError:
222 # assume user used tuple of int and choked ogr
223 fkey = [float(x) for x in key]
224 geom.SetPoint(0, *fkey)
225
226 return geom
227
228 # Create_feature with new optional attributes arg (should be dict type)
229 def create_feature(geometry, lyr, attributes=None):
230 feature = ogr.Feature(lyr.GetLayerDefn())
231 feature.SetGeometry(g)
232 if attributes is not None:
233 # Loop through attributes, assigning data to each field
234 for field, data in attributes.items():
235 feature.SetField(field, data)
236 lyr.CreateFeature(feature)
237 feature.Destroy()
238
239 drv = ogr.GetDriverByName("ESRI Shapefile")
240 shpdir = drv.CreateDataSource(outdir)
241 # delete pre-existing output first otherwise ogr chokes
242 try:
243 shpdir.DeleteLayer("nodes")
244 except:
245 pass
246 nodes = shpdir.CreateLayer("nodes", None, ogr.wkbPoint)
247 for n in G:
248 data = G.nodes[n]
249 g = netgeometry(n, data)
250 create_feature(g, nodes)
251 try:
252 shpdir.DeleteLayer("edges")
253 except:
254 pass
255 edges = shpdir.CreateLayer("edges", None, ogr.wkbLineString)
256
257 # New edge attribute write support merged into edge loop
258 fields = {} # storage for field names and their data types
259 attributes = {} # storage for attribute data (indexed by field names)
260
261 # Conversion dict between python and ogr types
262 OGRTypes = {int: ogr.OFTInteger, str: ogr.OFTString, float: ogr.OFTReal}
263
264 # Edge loop
265 for e in G.edges(data=True):
266 data = G.get_edge_data(*e)
267 g = netgeometry(e, data)
268 # Loop through attribute data in edges
269 for key, data in e[2].items():
270 # Reject spatial data not required for attribute table
271 if (key != 'Json' and key != 'Wkt' and key != 'Wkb'
272 and key != 'ShpName'):
273 # For all edges check/add field and data type to fields dict
274 if key not in fields:
275 # Field not in previous edges so add to dict
276 if type(data) in OGRTypes:
277 fields[key] = OGRTypes[type(data)]
278 else:
279 # Data type not supported, default to string (char 80)
280 fields[key] = ogr.OFTString
281 # Create the new field
282 newfield = ogr.FieldDefn(key, fields[key])
283 edges.CreateField(newfield)
284 # Store the data from new field to dict for CreateLayer()
285 attributes[key] = data
286 else:
287 # Field already exists, add data to dict for CreateLayer()
288 attributes[key] = data
289 # Create the feature with, passing new attribute data
290 create_feature(g, edges, attributes)
291
292 nodes, edges = None, None
293
294
295 # fixture for nose tests
296 def setup_module(module):
297 from nose import SkipTest
298 try:
299 import ogr
300 except:
301 raise SkipTest("OGR not available")
302
[end of networkx/readwrite/nx_shp.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/networkx/readwrite/nx_shp.py b/networkx/readwrite/nx_shp.py
--- a/networkx/readwrite/nx_shp.py
+++ b/networkx/readwrite/nx_shp.py
@@ -23,7 +23,7 @@
__all__ = ['read_shp', 'write_shp']
-def read_shp(path, simplify=True, geom_attrs=True):
+def read_shp(path, simplify=True, geom_attrs=True, strict=True):
"""Generates a networkx.DiGraph from shapefiles. Point geometries are
translated into nodes, lines into edges. Coordinate tuples are used as
keys. Attributes are preserved, line geometries are simplified into start
@@ -53,11 +53,27 @@
the edge geometry as well (as they do when they are read via
this method) and they change, your geomety will be out of sync.
+ strict: bool
+ If True, raise NetworkXError when feature geometry is missing or
+ GeometryType is not supported.
+ If False, silently ignore missing or unsupported geometry in features.
Returns
-------
G : NetworkX graph
+ Raises
+ ------
+ ImportError
+ If ogr module is not available.
+
+ RuntimeError
+ If file cannot be open or read.
+
+ NetworkXError
+ If strict=True and feature is missing geometry or GeometryType is
+ not supported.
+
Examples
--------
>>> G=nx.read_shp('test.shp') # doctest: +SKIP
@@ -76,11 +92,18 @@
net = nx.DiGraph()
shp = ogr.Open(path)
+ if shp is None:
+ raise RuntimeError("Unable to open {}".format(path))
for lyr in shp:
fields = [x.GetName() for x in lyr.schema]
for f in lyr:
- flddata = [f.GetField(f.GetFieldIndex(x)) for x in fields]
g = f.geometry()
+ if g is None:
+ if strict:
+ raise nx.NetworkXError("Bad data: feature missing geometry")
+ else:
+ continue
+ flddata = [f.GetField(f.GetFieldIndex(x)) for x in fields]
attributes = dict(zip(fields, flddata))
attributes["ShpName"] = lyr.GetName()
# Note: Using layer level geometry type
@@ -94,8 +117,9 @@
net.add_edge(e1, e2)
net[e1][e2].update(attr)
else:
- raise ImportError("GeometryType {} not supported".
- format(g.GetGeometryType()))
+ if strict:
+ raise nx.NetworkXError("GeometryType {} not supported".
+ format(g.GetGeometryType()))
return net
@@ -256,13 +280,13 @@
# New edge attribute write support merged into edge loop
fields = {} # storage for field names and their data types
- attributes = {} # storage for attribute data (indexed by field names)
# Conversion dict between python and ogr types
OGRTypes = {int: ogr.OFTInteger, str: ogr.OFTString, float: ogr.OFTReal}
# Edge loop
for e in G.edges(data=True):
+ attributes = {} # storage for attribute data (indexed by field names)
data = G.get_edge_data(*e)
g = netgeometry(e, data)
# Loop through attribute data in edges
| {"golden_diff": "diff --git a/networkx/readwrite/nx_shp.py b/networkx/readwrite/nx_shp.py\n--- a/networkx/readwrite/nx_shp.py\n+++ b/networkx/readwrite/nx_shp.py\n@@ -23,7 +23,7 @@\n __all__ = ['read_shp', 'write_shp']\n \n \n-def read_shp(path, simplify=True, geom_attrs=True):\n+def read_shp(path, simplify=True, geom_attrs=True, strict=True):\n \"\"\"Generates a networkx.DiGraph from shapefiles. Point geometries are\n translated into nodes, lines into edges. Coordinate tuples are used as\n keys. Attributes are preserved, line geometries are simplified into start\n@@ -53,11 +53,27 @@\n the edge geometry as well (as they do when they are read via\n this method) and they change, your geomety will be out of sync.\n \n+ strict: bool\n+ If True, raise NetworkXError when feature geometry is missing or\n+ GeometryType is not supported.\n+ If False, silently ignore missing or unsupported geometry in features.\n \n Returns\n -------\n G : NetworkX graph\n \n+ Raises\n+ ------\n+ ImportError\n+ If ogr module is not available.\n+\n+ RuntimeError\n+ If file cannot be open or read.\n+\n+ NetworkXError\n+ If strict=True and feature is missing geometry or GeometryType is\n+ not supported.\n+\n Examples\n --------\n >>> G=nx.read_shp('test.shp') # doctest: +SKIP\n@@ -76,11 +92,18 @@\n \n net = nx.DiGraph()\n shp = ogr.Open(path)\n+ if shp is None:\n+ raise RuntimeError(\"Unable to open {}\".format(path))\n for lyr in shp:\n fields = [x.GetName() for x in lyr.schema]\n for f in lyr:\n- flddata = [f.GetField(f.GetFieldIndex(x)) for x in fields]\n g = f.geometry()\n+ if g is None:\n+ if strict:\n+ raise nx.NetworkXError(\"Bad data: feature missing geometry\")\n+ else:\n+ continue\n+ flddata = [f.GetField(f.GetFieldIndex(x)) for x in fields]\n attributes = dict(zip(fields, flddata))\n attributes[\"ShpName\"] = lyr.GetName()\n # Note: Using layer level geometry type\n@@ -94,8 +117,9 @@\n net.add_edge(e1, e2)\n net[e1][e2].update(attr)\n else:\n- raise ImportError(\"GeometryType {} not supported\".\n- format(g.GetGeometryType()))\n+ if strict:\n+ raise nx.NetworkXError(\"GeometryType {} not supported\".\n+ format(g.GetGeometryType()))\n \n return net\n \n@@ -256,13 +280,13 @@\n \n # New edge attribute write support merged into edge loop\n fields = {} # storage for field names and their data types\n- attributes = {} # storage for attribute data (indexed by field names)\n \n # Conversion dict between python and ogr types\n OGRTypes = {int: ogr.OFTInteger, str: ogr.OFTString, float: ogr.OFTReal}\n \n # Edge loop\n for e in G.edges(data=True):\n+ attributes = {} # storage for attribute data (indexed by field names)\n data = G.get_edge_data(*e)\n g = netgeometry(e, data)\n # Loop through attribute data in edges\n", "issue": "Bug in write_shp when some edges miss attribute(s)\n\r\nWhen writing shapefiles with v1.11, `write_shp` uses the last valid values for attributes an edge doesn't have. The reason is that the values of missing `key`s in the dictionary `attributes` are not updated in the edge loop. A work-around is to make sure all attributes are set before using `write_shp` or to remove edges with missing attributes but this requires a separate edge loop. So, it would be nice to have a fix integrated in `write_shp`. Thanks!\n", "before_files": [{"content": "\"\"\"\n*********\nShapefile\n*********\n\nGenerates a networkx.DiGraph from point and line shapefiles.\n\n\"The Esri Shapefile or simply a shapefile is a popular geospatial vector\ndata format for geographic information systems software. It is developed\nand regulated by Esri as a (mostly) open specification for data\ninteroperability among Esri and other software products.\"\nSee https://en.wikipedia.org/wiki/Shapefile for additional information.\n\"\"\"\n# Copyright (C) 2004-2017 by\n# Ben Reilly <[email protected]>\n# Aric Hagberg <[email protected]>\n# Dan Schult <[email protected]>\n# Pieter Swart <[email protected]>\n# All rights reserved.\n# BSD license.\nimport networkx as nx\n__author__ = \"\"\"Ben Reilly ([email protected])\"\"\"\n__all__ = ['read_shp', 'write_shp']\n\n\ndef read_shp(path, simplify=True, geom_attrs=True):\n \"\"\"Generates a networkx.DiGraph from shapefiles. Point geometries are\n translated into nodes, lines into edges. Coordinate tuples are used as\n keys. Attributes are preserved, line geometries are simplified into start\n and end coordinates. Accepts a single shapefile or directory of many\n shapefiles.\n\n \"The Esri Shapefile or simply a shapefile is a popular geospatial vector\n data format for geographic information systems software [1]_.\"\n\n Parameters\n ----------\n path : file or string\n File, directory, or filename to read.\n\n simplify: bool\n If True, simplify line geometries to start and end coordinates.\n If False, and line feature geometry has multiple segments, the\n non-geometric attributes for that feature will be repeated for each\n edge comprising that feature.\n\n geom_attrs: bool\n If True, include the Wkb, Wkt and Json geometry attributes with\n each edge.\n\n NOTE: if these attributes are available, write_shp will use them\n to write the geometry. If nodes store the underlying coordinates for\n the edge geometry as well (as they do when they are read via\n this method) and they change, your geomety will be out of sync.\n\n\n Returns\n -------\n G : NetworkX graph\n\n Examples\n --------\n >>> G=nx.read_shp('test.shp') # doctest: +SKIP\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Shapefile\n \"\"\"\n try:\n from osgeo import ogr\n except ImportError:\n raise ImportError(\"read_shp requires OGR: http://www.gdal.org/\")\n\n if not isinstance(path, str):\n return\n\n net = nx.DiGraph()\n shp = ogr.Open(path)\n for lyr in shp:\n fields = [x.GetName() for x in lyr.schema]\n for f in lyr:\n flddata = [f.GetField(f.GetFieldIndex(x)) for x in fields]\n g = f.geometry()\n attributes = dict(zip(fields, flddata))\n attributes[\"ShpName\"] = lyr.GetName()\n # Note: Using layer level geometry type\n if g.GetGeometryType() == ogr.wkbPoint:\n net.add_node((g.GetPoint_2D(0)), **attributes)\n elif g.GetGeometryType() in (ogr.wkbLineString,\n ogr.wkbMultiLineString):\n for edge in edges_from_line(g, attributes, simplify,\n geom_attrs):\n e1, e2, attr = edge\n net.add_edge(e1, e2)\n net[e1][e2].update(attr)\n else:\n raise ImportError(\"GeometryType {} not supported\".\n format(g.GetGeometryType()))\n\n return net\n\n\ndef edges_from_line(geom, attrs, simplify=True, geom_attrs=True):\n \"\"\"\n Generate edges for each line in geom\n Written as a helper for read_shp\n\n Parameters\n ----------\n\n geom: ogr line geometry\n To be converted into an edge or edges\n\n attrs: dict\n Attributes to be associated with all geoms\n\n simplify: bool\n If True, simplify the line as in read_shp\n\n geom_attrs: bool\n If True, add geom attributes to edge as in read_shp\n\n\n Returns\n -------\n edges: generator of edges\n each edge is a tuple of form\n (node1_coord, node2_coord, attribute_dict)\n suitable for expanding into a networkx Graph add_edge call\n \"\"\"\n try:\n from osgeo import ogr\n except ImportError:\n raise ImportError(\"edges_from_line requires OGR: http://www.gdal.org/\")\n\n if geom.GetGeometryType() == ogr.wkbLineString:\n if simplify:\n edge_attrs = attrs.copy()\n last = geom.GetPointCount() - 1\n if geom_attrs:\n edge_attrs[\"Wkb\"] = geom.ExportToWkb()\n edge_attrs[\"Wkt\"] = geom.ExportToWkt()\n edge_attrs[\"Json\"] = geom.ExportToJson()\n yield (geom.GetPoint_2D(0), geom.GetPoint_2D(last), edge_attrs)\n else:\n for i in range(0, geom.GetPointCount() - 1):\n pt1 = geom.GetPoint_2D(i)\n pt2 = geom.GetPoint_2D(i + 1)\n edge_attrs = attrs.copy()\n if geom_attrs:\n segment = ogr.Geometry(ogr.wkbLineString)\n segment.AddPoint_2D(pt1[0], pt1[1])\n segment.AddPoint_2D(pt2[0], pt2[1])\n edge_attrs[\"Wkb\"] = segment.ExportToWkb()\n edge_attrs[\"Wkt\"] = segment.ExportToWkt()\n edge_attrs[\"Json\"] = segment.ExportToJson()\n del segment\n yield (pt1, pt2, edge_attrs)\n\n elif geom.GetGeometryType() == ogr.wkbMultiLineString:\n for i in range(geom.GetGeometryCount()):\n geom_i = geom.GetGeometryRef(i)\n for edge in edges_from_line(geom_i, attrs, simplify, geom_attrs):\n yield edge\n\n\ndef write_shp(G, outdir):\n \"\"\"Writes a networkx.DiGraph to two shapefiles, edges and nodes.\n Nodes and edges are expected to have a Well Known Binary (Wkb) or\n Well Known Text (Wkt) key in order to generate geometries. Also\n acceptable are nodes with a numeric tuple key (x,y).\n\n \"The Esri Shapefile or simply a shapefile is a popular geospatial vector\n data format for geographic information systems software [1]_.\"\n\n Parameters\n ----------\n outdir : directory path\n Output directory for the two shapefiles.\n\n Returns\n -------\n None\n\n Examples\n --------\n nx.write_shp(digraph, '/shapefiles') # doctest +SKIP\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Shapefile\n \"\"\"\n try:\n from osgeo import ogr\n except ImportError:\n raise ImportError(\"write_shp requires OGR: http://www.gdal.org/\")\n # easier to debug in python if ogr throws exceptions\n ogr.UseExceptions()\n\n def netgeometry(key, data):\n if 'Wkb' in data:\n geom = ogr.CreateGeometryFromWkb(data['Wkb'])\n elif 'Wkt' in data:\n geom = ogr.CreateGeometryFromWkt(data['Wkt'])\n elif type(key[0]).__name__ == 'tuple': # edge keys are packed tuples\n geom = ogr.Geometry(ogr.wkbLineString)\n _from, _to = key[0], key[1]\n try:\n geom.SetPoint(0, *_from)\n geom.SetPoint(1, *_to)\n except TypeError:\n # assume user used tuple of int and choked ogr\n _ffrom = [float(x) for x in _from]\n _fto = [float(x) for x in _to]\n geom.SetPoint(0, *_ffrom)\n geom.SetPoint(1, *_fto)\n else:\n geom = ogr.Geometry(ogr.wkbPoint)\n try:\n geom.SetPoint(0, *key)\n except TypeError:\n # assume user used tuple of int and choked ogr\n fkey = [float(x) for x in key]\n geom.SetPoint(0, *fkey)\n\n return geom\n\n # Create_feature with new optional attributes arg (should be dict type)\n def create_feature(geometry, lyr, attributes=None):\n feature = ogr.Feature(lyr.GetLayerDefn())\n feature.SetGeometry(g)\n if attributes is not None:\n # Loop through attributes, assigning data to each field\n for field, data in attributes.items():\n feature.SetField(field, data)\n lyr.CreateFeature(feature)\n feature.Destroy()\n\n drv = ogr.GetDriverByName(\"ESRI Shapefile\")\n shpdir = drv.CreateDataSource(outdir)\n # delete pre-existing output first otherwise ogr chokes\n try:\n shpdir.DeleteLayer(\"nodes\")\n except:\n pass\n nodes = shpdir.CreateLayer(\"nodes\", None, ogr.wkbPoint)\n for n in G:\n data = G.nodes[n]\n g = netgeometry(n, data)\n create_feature(g, nodes)\n try:\n shpdir.DeleteLayer(\"edges\")\n except:\n pass\n edges = shpdir.CreateLayer(\"edges\", None, ogr.wkbLineString)\n\n # New edge attribute write support merged into edge loop\n fields = {} # storage for field names and their data types\n attributes = {} # storage for attribute data (indexed by field names)\n\n # Conversion dict between python and ogr types\n OGRTypes = {int: ogr.OFTInteger, str: ogr.OFTString, float: ogr.OFTReal}\n\n # Edge loop\n for e in G.edges(data=True):\n data = G.get_edge_data(*e)\n g = netgeometry(e, data)\n # Loop through attribute data in edges\n for key, data in e[2].items():\n # Reject spatial data not required for attribute table\n if (key != 'Json' and key != 'Wkt' and key != 'Wkb'\n and key != 'ShpName'):\n # For all edges check/add field and data type to fields dict\n if key not in fields:\n # Field not in previous edges so add to dict\n if type(data) in OGRTypes:\n fields[key] = OGRTypes[type(data)]\n else:\n # Data type not supported, default to string (char 80)\n fields[key] = ogr.OFTString\n # Create the new field\n newfield = ogr.FieldDefn(key, fields[key])\n edges.CreateField(newfield)\n # Store the data from new field to dict for CreateLayer()\n attributes[key] = data\n else:\n # Field already exists, add data to dict for CreateLayer()\n attributes[key] = data\n # Create the feature with, passing new attribute data\n create_feature(g, edges, attributes)\n\n nodes, edges = None, None\n\n\n# fixture for nose tests\ndef setup_module(module):\n from nose import SkipTest\n try:\n import ogr\n except:\n raise SkipTest(\"OGR not available\")\n", "path": "networkx/readwrite/nx_shp.py"}]} | 3,973 | 783 |
gh_patches_debug_23036 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-1364 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rule E3028 throws exception if RDS::DBCluster is present without EngineMode specified
*cfn-lint version: `0.28.0`*
*Description of issue.*
Error thrown:
```
E0002 Unknown exception while processing rule E3028: 'NoneType' object has no attribute 'items_safe'
```
Debug log:
```
2020-02-17 15:24:57,775 - cfnlint - DEBUG - Completed linting of file: .\test.yml
E0002 Unknown exception while processing rule E3028: Traceback (most recent call last):
File "c:\python36\lib\site-packages\cfnlint\rules\__init__.py", line 203, in run_check
return check(*args)
File "c:\python36\lib\site-packages\cfnlint\rules\__init__.py", line 89, in wrapper
results = match_function(self, filename, cfn, *args, **kwargs)
File "c:\python36\lib\site-packages\cfnlint\rules\__init__.py", line 122, in matchall_resource_properties
return self.match_resource_properties(resource_properties, property_type, path, cfn) # pylint: disable=E1102
File "c:\python36\lib\site-packages\cfnlint\rules\resources\rds\AuroraScalingConfiguration.py", line 44, in match_resource_properties
matches.extend(self.check(properties, path, cfn))
File "c:\python36\lib\site-packages\cfnlint\rules\resources\rds\AuroraScalingConfiguration.py", line 30, in check
engine = properties.get_safe('EngineMode', type_t=six.string_types)
File "c:\python36\lib\site-packages\cfnlint\decode\node.py", line 125, in get_safe
for sub_v, sub_path in value.items_safe(path + [key]):
AttributeError: 'NoneType' object has no attribute 'items_safe'
.\test.yml:1:1
```
Breaking template:
```
---
AWSTemplateFormatVersion: 2010-09-09
Resources:
DatabaseCluster:
Type: AWS::RDS::DBCluster
Properties:
Engine: aurora-postgresql
#EngineMode: provisioned
EngineVersion: "10.5"
```
Working template:
```
---
AWSTemplateFormatVersion: 2010-09-09
Resources:
DatabaseCluster:
Type: AWS::RDS::DBCluster
Properties:
Engine: aurora-postgresql
EngineMode: provisioned
EngineVersion: "10.5"
```
Problem is around here: https://github.com/aws-cloudformation/cfn-python-lint/blob/084f832f50408c8bea3f087f48d9b38e4e5a5c9d/src/cfnlint/rules/resources/rds/AuroraScalingConfiguration.py#L26-L31
I'm suspecting that `get_value_from_scenario` is returning `None` and then the rule isn't checking for that: https://github.com/aws-cloudformation/cfn-python-lint/blob/0757dc4c7ba108f47c19eb0917b0e6e37ee62d83/src/cfnlint/__init__.py#L825
</issue>
<code>
[start of src/cfnlint/decode/node.py]
1 """
2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import sys
6 import logging
7 from copy import deepcopy
8 import six
9
10 LOGGER = logging.getLogger(__name__)
11
12
13 class TemplateAttributeError(AttributeError):
14 """ Custom error to capture Attribute Errors in the Template """
15
16
17 def create_str_node_class(cls):
18 """
19 Create string node class
20 """
21 class node_class(cls):
22 """Node class created based on the input class"""
23
24 def __init__(self, x, start_mark, end_mark):
25 try:
26 cls.__init__(self, x)
27 except TypeError:
28 cls.__init__(self)
29 self.start_mark = start_mark
30 self.end_mark = end_mark
31
32 # pylint: disable=bad-classmethod-argument, unused-argument
33 def __new__(self, x, start_mark, end_mark):
34 if sys.version_info >= (3, 0):
35 return cls.__new__(self, x)
36
37 if isinstance(x, six.string_types):
38 return cls.__new__(self, x.encode('ascii', 'ignore'))
39
40 return cls.__new__(self, x)
41
42 def __getattr__(self, name):
43 raise TemplateAttributeError('%s.%s is invalid' % (self.__class__.__name__, name))
44
45 def __deepcopy__(self, memo):
46 result = str_node(self, self.start_mark, self.end_mark)
47 memo[id(self)] = result
48 return result
49
50 def __copy__(self):
51 return self
52
53 node_class.__name__ = '%s_node' % cls.__name__
54 return node_class
55
56
57 def create_dict_node_class(cls):
58 """
59 Create dynamic node class
60 """
61 class node_class(cls):
62 """Node class created based on the input class"""
63
64 def __init__(self, x, start_mark, end_mark):
65 try:
66 cls.__init__(self, x)
67 except TypeError:
68 cls.__init__(self)
69 self.start_mark = start_mark
70 self.end_mark = end_mark
71 self.condition_functions = ['Fn::If']
72
73 def __deepcopy__(self, memo):
74 result = dict_node(self, self.start_mark, self.end_mark)
75 memo[id(self)] = result
76 for k, v in self.items():
77 result[deepcopy(k)] = deepcopy(v, memo)
78
79 return result
80
81 def __copy__(self):
82 return self
83
84 def is_function_returning_object(self, mappings=None):
85 """
86 Check if an object is using a function that could return an object
87 Return True when
88 Fn::Select:
89 - 0 # or any number
90 - !FindInMap [mapname, key, value] # or any mapname, key, value
91 Otherwise False
92 """
93 mappings = mappings or {}
94 if len(self) == 1:
95 for k, v in self.items():
96 if k in ['Fn::Select']:
97 if isinstance(v, list):
98 if len(v) == 2:
99 p_v = v[1]
100 if isinstance(p_v, dict):
101 if len(p_v) == 1:
102 for l_k in p_v.keys():
103 if l_k == 'Fn::FindInMap':
104 return True
105
106 return False
107
108 def get(self, key, default=None):
109 """ Override the default get """
110 if isinstance(default, dict):
111 default = dict_node(default, self.start_mark, self.end_mark)
112 return super(node_class, self).get(key, default)
113
114 def get_safe(self, key, default=None, path=None, type_t=()):
115 """
116 Get values in format
117 """
118 path = path or []
119 value = self.get(key, default)
120 if not isinstance(value, (dict)):
121 if isinstance(value, type_t) or not type_t:
122 return [(value, (path[:] + [key]))]
123
124 results = []
125 for sub_v, sub_path in value.items_safe(path + [key]):
126 if isinstance(sub_v, type_t) or not type_t:
127 results.append((sub_v, sub_path))
128
129 return results
130
131 def items_safe(self, path=None, type_t=()):
132 """Get items while handling IFs"""
133 path = path or []
134 if len(self) == 1:
135 for k, v in self.items():
136 if k == 'Fn::If':
137 if isinstance(v, list):
138 if len(v) == 3:
139 for i, if_v in enumerate(v[1:]):
140 if isinstance(if_v, dict):
141 # yield from if_v.items_safe(path[:] + [k, i - 1])
142 # Python 2.7 support
143 for items, p in if_v.items_safe(path[:] + [k, i + 1]):
144 if isinstance(items, type_t) or not type_t:
145 yield items, p
146 elif isinstance(if_v, list):
147 if isinstance(if_v, type_t) or not type_t:
148 yield if_v, path[:] + [k, i + 1]
149 else:
150 if isinstance(if_v, type_t) or not type_t:
151 yield if_v, path[:] + [k, i + 1]
152 elif not (k == 'Ref' and v == 'AWS::NoValue'):
153 if isinstance(self, type_t) or not type_t:
154 yield self, path[:]
155 else:
156 if isinstance(self, type_t) or not type_t:
157 yield self, path[:]
158
159 def __getattr__(self, name):
160 raise TemplateAttributeError('%s.%s is invalid' % (self.__class__.__name__, name))
161
162 node_class.__name__ = '%s_node' % cls.__name__
163 return node_class
164
165
166 def create_dict_list_class(cls):
167 """
168 Create dynamic list class
169 """
170 class node_class(cls):
171 """Node class created based on the input class"""
172
173 def __init__(self, x, start_mark, end_mark):
174 try:
175 cls.__init__(self, x)
176 except TypeError:
177 cls.__init__(self)
178 self.start_mark = start_mark
179 self.end_mark = end_mark
180 self.condition_functions = ['Fn::If']
181
182 def __deepcopy__(self, memo):
183 result = list_node([], self.start_mark, self.end_mark)
184 memo[id(self)] = result
185 for _, v in enumerate(self):
186 result.append(deepcopy(v, memo))
187
188 return result
189
190 def __copy__(self):
191 return self
192
193 def items_safe(self, path=None, type_t=()):
194 """Get items while handling IFs"""
195 path = path or []
196 for i, v in enumerate(self):
197 if isinstance(v, dict):
198 for items, p in v.items_safe(path[:] + [i]):
199 if isinstance(items, type_t) or not type_t:
200 yield items, p
201 else:
202 if isinstance(v, type_t) or not type_t:
203 yield v, path[:] + [i]
204
205 def __getattr__(self, name):
206 raise TemplateAttributeError('%s.%s is invalid' % (self.__class__.__name__, name))
207
208 node_class.__name__ = '%s_node' % cls.__name__
209 return node_class
210
211
212 str_node = create_str_node_class(str)
213 dict_node = create_dict_node_class(dict)
214 list_node = create_dict_list_class(list)
215
[end of src/cfnlint/decode/node.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/decode/node.py b/src/cfnlint/decode/node.py
--- a/src/cfnlint/decode/node.py
+++ b/src/cfnlint/decode/node.py
@@ -117,14 +117,23 @@
"""
path = path or []
value = self.get(key, default)
+ if value is None and default is None:
+ # if default is None and value is None return empty list
+ return []
+
+ # if the value is the default make sure that the default value is of type_t when specified
+ if bool(type_t) and value == default and not isinstance(default, type_t):
+ raise ValueError('"default" type should be of "type_t"')
+
+ # when not a dict see if if the value is of the right type
+ results = []
if not isinstance(value, (dict)):
if isinstance(value, type_t) or not type_t:
return [(value, (path[:] + [key]))]
-
- results = []
- for sub_v, sub_path in value.items_safe(path + [key]):
- if isinstance(sub_v, type_t) or not type_t:
- results.append((sub_v, sub_path))
+ else:
+ for sub_v, sub_path in value.items_safe(path + [key]):
+ if isinstance(sub_v, type_t) or not type_t:
+ results.append((sub_v, sub_path))
return results
| {"golden_diff": "diff --git a/src/cfnlint/decode/node.py b/src/cfnlint/decode/node.py\n--- a/src/cfnlint/decode/node.py\n+++ b/src/cfnlint/decode/node.py\n@@ -117,14 +117,23 @@\n \"\"\"\n path = path or []\n value = self.get(key, default)\n+ if value is None and default is None:\n+ # if default is None and value is None return empty list\n+ return []\n+\n+ # if the value is the default make sure that the default value is of type_t when specified\n+ if bool(type_t) and value == default and not isinstance(default, type_t):\n+ raise ValueError('\"default\" type should be of \"type_t\"')\n+\n+ # when not a dict see if if the value is of the right type\n+ results = []\n if not isinstance(value, (dict)):\n if isinstance(value, type_t) or not type_t:\n return [(value, (path[:] + [key]))]\n-\n- results = []\n- for sub_v, sub_path in value.items_safe(path + [key]):\n- if isinstance(sub_v, type_t) or not type_t:\n- results.append((sub_v, sub_path))\n+ else:\n+ for sub_v, sub_path in value.items_safe(path + [key]):\n+ if isinstance(sub_v, type_t) or not type_t:\n+ results.append((sub_v, sub_path))\n \n return results\n", "issue": "Rule E3028 throws exception if RDS::DBCluster is present without EngineMode specified\n*cfn-lint version: `0.28.0`*\r\n\r\n*Description of issue.*\r\nError thrown:\r\n```\r\nE0002 Unknown exception while processing rule E3028: 'NoneType' object has no attribute 'items_safe'\r\n```\r\nDebug log:\r\n```\r\n2020-02-17 15:24:57,775 - cfnlint - DEBUG - Completed linting of file: .\\test.yml\r\nE0002 Unknown exception while processing rule E3028: Traceback (most recent call last):\r\n File \"c:\\python36\\lib\\site-packages\\cfnlint\\rules\\__init__.py\", line 203, in run_check\r\n return check(*args)\r\n File \"c:\\python36\\lib\\site-packages\\cfnlint\\rules\\__init__.py\", line 89, in wrapper\r\n results = match_function(self, filename, cfn, *args, **kwargs)\r\n File \"c:\\python36\\lib\\site-packages\\cfnlint\\rules\\__init__.py\", line 122, in matchall_resource_properties\r\n return self.match_resource_properties(resource_properties, property_type, path, cfn) # pylint: disable=E1102\r\n File \"c:\\python36\\lib\\site-packages\\cfnlint\\rules\\resources\\rds\\AuroraScalingConfiguration.py\", line 44, in match_resource_properties\r\n matches.extend(self.check(properties, path, cfn))\r\n File \"c:\\python36\\lib\\site-packages\\cfnlint\\rules\\resources\\rds\\AuroraScalingConfiguration.py\", line 30, in check\r\n engine = properties.get_safe('EngineMode', type_t=six.string_types)\r\n File \"c:\\python36\\lib\\site-packages\\cfnlint\\decode\\node.py\", line 125, in get_safe\r\n for sub_v, sub_path in value.items_safe(path + [key]):\r\nAttributeError: 'NoneType' object has no attribute 'items_safe'\r\n.\\test.yml:1:1\r\n```\r\n\r\nBreaking template:\r\n```\r\n---\r\nAWSTemplateFormatVersion: 2010-09-09\r\n\r\nResources:\r\n DatabaseCluster:\r\n Type: AWS::RDS::DBCluster\r\n Properties:\r\n Engine: aurora-postgresql\r\n #EngineMode: provisioned\r\n EngineVersion: \"10.5\"\r\n```\r\nWorking template:\r\n```\r\n---\r\nAWSTemplateFormatVersion: 2010-09-09\r\n\r\nResources:\r\n DatabaseCluster:\r\n Type: AWS::RDS::DBCluster\r\n Properties:\r\n Engine: aurora-postgresql\r\n EngineMode: provisioned\r\n EngineVersion: \"10.5\"\r\n```\r\n\r\nProblem is around here: https://github.com/aws-cloudformation/cfn-python-lint/blob/084f832f50408c8bea3f087f48d9b38e4e5a5c9d/src/cfnlint/rules/resources/rds/AuroraScalingConfiguration.py#L26-L31\r\n\r\nI'm suspecting that `get_value_from_scenario` is returning `None` and then the rule isn't checking for that: https://github.com/aws-cloudformation/cfn-python-lint/blob/0757dc4c7ba108f47c19eb0917b0e6e37ee62d83/src/cfnlint/__init__.py#L825\n", "before_files": [{"content": "\"\"\"\nCopyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport sys\nimport logging\nfrom copy import deepcopy\nimport six\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass TemplateAttributeError(AttributeError):\n \"\"\" Custom error to capture Attribute Errors in the Template \"\"\"\n\n\ndef create_str_node_class(cls):\n \"\"\"\n Create string node class\n \"\"\"\n class node_class(cls):\n \"\"\"Node class created based on the input class\"\"\"\n\n def __init__(self, x, start_mark, end_mark):\n try:\n cls.__init__(self, x)\n except TypeError:\n cls.__init__(self)\n self.start_mark = start_mark\n self.end_mark = end_mark\n\n # pylint: disable=bad-classmethod-argument, unused-argument\n def __new__(self, x, start_mark, end_mark):\n if sys.version_info >= (3, 0):\n return cls.__new__(self, x)\n\n if isinstance(x, six.string_types):\n return cls.__new__(self, x.encode('ascii', 'ignore'))\n\n return cls.__new__(self, x)\n\n def __getattr__(self, name):\n raise TemplateAttributeError('%s.%s is invalid' % (self.__class__.__name__, name))\n\n def __deepcopy__(self, memo):\n result = str_node(self, self.start_mark, self.end_mark)\n memo[id(self)] = result\n return result\n\n def __copy__(self):\n return self\n\n node_class.__name__ = '%s_node' % cls.__name__\n return node_class\n\n\ndef create_dict_node_class(cls):\n \"\"\"\n Create dynamic node class\n \"\"\"\n class node_class(cls):\n \"\"\"Node class created based on the input class\"\"\"\n\n def __init__(self, x, start_mark, end_mark):\n try:\n cls.__init__(self, x)\n except TypeError:\n cls.__init__(self)\n self.start_mark = start_mark\n self.end_mark = end_mark\n self.condition_functions = ['Fn::If']\n\n def __deepcopy__(self, memo):\n result = dict_node(self, self.start_mark, self.end_mark)\n memo[id(self)] = result\n for k, v in self.items():\n result[deepcopy(k)] = deepcopy(v, memo)\n\n return result\n\n def __copy__(self):\n return self\n\n def is_function_returning_object(self, mappings=None):\n \"\"\"\n Check if an object is using a function that could return an object\n Return True when\n Fn::Select:\n - 0 # or any number\n - !FindInMap [mapname, key, value] # or any mapname, key, value\n Otherwise False\n \"\"\"\n mappings = mappings or {}\n if len(self) == 1:\n for k, v in self.items():\n if k in ['Fn::Select']:\n if isinstance(v, list):\n if len(v) == 2:\n p_v = v[1]\n if isinstance(p_v, dict):\n if len(p_v) == 1:\n for l_k in p_v.keys():\n if l_k == 'Fn::FindInMap':\n return True\n\n return False\n\n def get(self, key, default=None):\n \"\"\" Override the default get \"\"\"\n if isinstance(default, dict):\n default = dict_node(default, self.start_mark, self.end_mark)\n return super(node_class, self).get(key, default)\n\n def get_safe(self, key, default=None, path=None, type_t=()):\n \"\"\"\n Get values in format\n \"\"\"\n path = path or []\n value = self.get(key, default)\n if not isinstance(value, (dict)):\n if isinstance(value, type_t) or not type_t:\n return [(value, (path[:] + [key]))]\n\n results = []\n for sub_v, sub_path in value.items_safe(path + [key]):\n if isinstance(sub_v, type_t) or not type_t:\n results.append((sub_v, sub_path))\n\n return results\n\n def items_safe(self, path=None, type_t=()):\n \"\"\"Get items while handling IFs\"\"\"\n path = path or []\n if len(self) == 1:\n for k, v in self.items():\n if k == 'Fn::If':\n if isinstance(v, list):\n if len(v) == 3:\n for i, if_v in enumerate(v[1:]):\n if isinstance(if_v, dict):\n # yield from if_v.items_safe(path[:] + [k, i - 1])\n # Python 2.7 support\n for items, p in if_v.items_safe(path[:] + [k, i + 1]):\n if isinstance(items, type_t) or not type_t:\n yield items, p\n elif isinstance(if_v, list):\n if isinstance(if_v, type_t) or not type_t:\n yield if_v, path[:] + [k, i + 1]\n else:\n if isinstance(if_v, type_t) or not type_t:\n yield if_v, path[:] + [k, i + 1]\n elif not (k == 'Ref' and v == 'AWS::NoValue'):\n if isinstance(self, type_t) or not type_t:\n yield self, path[:]\n else:\n if isinstance(self, type_t) or not type_t:\n yield self, path[:]\n\n def __getattr__(self, name):\n raise TemplateAttributeError('%s.%s is invalid' % (self.__class__.__name__, name))\n\n node_class.__name__ = '%s_node' % cls.__name__\n return node_class\n\n\ndef create_dict_list_class(cls):\n \"\"\"\n Create dynamic list class\n \"\"\"\n class node_class(cls):\n \"\"\"Node class created based on the input class\"\"\"\n\n def __init__(self, x, start_mark, end_mark):\n try:\n cls.__init__(self, x)\n except TypeError:\n cls.__init__(self)\n self.start_mark = start_mark\n self.end_mark = end_mark\n self.condition_functions = ['Fn::If']\n\n def __deepcopy__(self, memo):\n result = list_node([], self.start_mark, self.end_mark)\n memo[id(self)] = result\n for _, v in enumerate(self):\n result.append(deepcopy(v, memo))\n\n return result\n\n def __copy__(self):\n return self\n\n def items_safe(self, path=None, type_t=()):\n \"\"\"Get items while handling IFs\"\"\"\n path = path or []\n for i, v in enumerate(self):\n if isinstance(v, dict):\n for items, p in v.items_safe(path[:] + [i]):\n if isinstance(items, type_t) or not type_t:\n yield items, p\n else:\n if isinstance(v, type_t) or not type_t:\n yield v, path[:] + [i]\n\n def __getattr__(self, name):\n raise TemplateAttributeError('%s.%s is invalid' % (self.__class__.__name__, name))\n\n node_class.__name__ = '%s_node' % cls.__name__\n return node_class\n\n\nstr_node = create_str_node_class(str)\ndict_node = create_dict_node_class(dict)\nlist_node = create_dict_list_class(list)\n", "path": "src/cfnlint/decode/node.py"}]} | 3,480 | 325 |
gh_patches_debug_32526 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-1766 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
WPS204: Overused expression should not count unary minus operator
# Bug report
Using unary minus operator often breaks rule WPS204
## What's wrong
Using unary minus operator even with named variable violates rule WPS204.
Example:
```python
my_value = 5
position = (0, -my_value)
```
## How it should be
No violation should be raised.
## System information
### flake8 information
Contents of `flake8 --bug-report`:
### pip information
Contents of `pip freeze`:
</issue>
<code>
[start of wemake_python_styleguide/visitors/ast/complexity/overuses.py]
1 import ast
2 from collections import defaultdict
3 from typing import (
4 Callable,
5 ClassVar,
6 DefaultDict,
7 FrozenSet,
8 List,
9 Tuple,
10 Union,
11 )
12
13 from typing_extensions import final
14
15 from wemake_python_styleguide.compat.aliases import FunctionNodes
16 from wemake_python_styleguide.logic import source, walk
17 from wemake_python_styleguide.logic.complexity import overuses
18 from wemake_python_styleguide.types import AnyNodes, AnyText, AnyTextPrimitive
19 from wemake_python_styleguide.violations import complexity
20 from wemake_python_styleguide.visitors import base, decorators
21
22 #: We use these types to store the number of nodes usage in different contexts.
23 _Expressions = DefaultDict[str, List[ast.AST]]
24 _FunctionExpressions = DefaultDict[ast.AST, _Expressions]
25 _StringConstants = FrozenSet[Union[str, bytes]]
26
27
28 @final
29 @decorators.alias('visit_any_string', (
30 'visit_Str',
31 'visit_Bytes',
32 ))
33 class StringOveruseVisitor(base.BaseNodeVisitor):
34 """
35 Restricts repeated usage of the same string constant.
36
37 NB: Some short strings are ignored, as their use is very common and
38 forcing assignment would not make much sense (i.e. newlines or "").
39 """
40
41 _ignored_string_constants: ClassVar[_StringConstants] = frozenset((
42 ' ',
43 '',
44 '\n',
45 '\r\n',
46 '\t',
47 b' ',
48 b'',
49 b'\n',
50 b'\r\n',
51 b'\t',
52 ))
53
54 def __init__(self, *args, **kwargs) -> None:
55 """Inits the counter for constants."""
56 super().__init__(*args, **kwargs)
57 self._string_constants: DefaultDict[
58 AnyTextPrimitive, int,
59 ] = defaultdict(int)
60
61 def visit_any_string(self, node: AnyText) -> None:
62 """
63 Restricts to over-use string constants.
64
65 Raises:
66 OverusedStringViolation
67
68 """
69 self._check_string_constant(node)
70 self.generic_visit(node)
71
72 def _check_string_constant(self, node: AnyText) -> None:
73 if overuses.is_annotation(node):
74 return
75
76 # Some strings are so common, that it makes no sense to check if
77 # they are overused.
78 if node.s in self._ignored_string_constants:
79 return
80
81 self._string_constants[node.s] += 1
82
83 def _post_visit(self) -> None:
84 for string, usage_count in self._string_constants.items():
85 if usage_count > self.options.max_string_usages:
86 self.add_violation(
87 complexity.OverusedStringViolation(
88 text=source.render_string(string) or "''",
89 baseline=self.options.max_string_usages,
90 ),
91 )
92
93
94 @final
95 class ExpressionOveruseVisitor(base.BaseNodeVisitor):
96 """Finds overused expressions."""
97
98 _expressions: ClassVar[AnyNodes] = (
99 # We do not treat `ast.Attribute`s as expressions
100 # because they are too widely used. That's a compromise.
101 ast.Assert,
102 ast.BoolOp,
103 ast.BinOp,
104 ast.Call,
105 ast.Compare,
106 ast.Subscript,
107 ast.UnaryOp,
108 ast.Lambda,
109
110 ast.DictComp,
111 ast.Dict,
112 ast.List,
113 ast.ListComp,
114 ast.Tuple,
115 ast.GeneratorExp,
116 ast.Set,
117 ast.SetComp,
118 )
119
120 _ignore_predicates: Tuple[Callable[[ast.AST], bool], ...] = (
121 overuses.is_decorator,
122 overuses.is_self,
123 overuses.is_annotation,
124 overuses.is_class_context,
125 overuses.is_super_call,
126 overuses.is_primitive,
127 )
128
129 _msg: ClassVar[str] = '{0}; used {1}'
130
131 def __init__(self, *args, **kwargs) -> None:
132 """We need to track expression usage in functions and modules."""
133 super().__init__(*args, **kwargs)
134 self._module_expressions: _Expressions = defaultdict(list)
135 self._function_expressions: _FunctionExpressions = defaultdict(
136 lambda: defaultdict(list),
137 )
138
139 def visit(self, node: ast.AST) -> None:
140 """
141 Visits all nodes in a module to find overused values.
142
143 Raises:
144 OverusedExpressionViolation
145
146 """
147 if isinstance(node, self._expressions):
148 self._add_expression(node)
149 self.generic_visit(node)
150
151 def _add_expression(self, node: ast.AST) -> None:
152 if any(ignore(node) for ignore in self._ignore_predicates):
153 return
154
155 source_code = source.node_to_string(node)
156 self._module_expressions[source_code].append(node)
157
158 maybe_function = walk.get_closest_parent(node, FunctionNodes)
159 if maybe_function is not None:
160 self._function_expressions[maybe_function][source_code].append(
161 node,
162 )
163
164 def _post_visit(self) -> None:
165 for mod_source, module_nodes in self._module_expressions.items():
166 if len(module_nodes) > self.options.max_module_expressions:
167 self.add_violation(
168 complexity.OverusedExpressionViolation(
169 module_nodes[0],
170 text=self._msg.format(mod_source, len(module_nodes)),
171 baseline=self.options.max_module_expressions,
172 ),
173 )
174
175 for function_contexts in self._function_expressions.values():
176 for src, function_nodes in function_contexts.items():
177 if len(function_nodes) > self.options.max_function_expressions:
178 self.add_violation(
179 complexity.OverusedExpressionViolation(
180 function_nodes[0],
181 text=self._msg.format(src, len(function_nodes)),
182 baseline=self.options.max_function_expressions,
183 ),
184 )
185
[end of wemake_python_styleguide/visitors/ast/complexity/overuses.py]
[start of wemake_python_styleguide/logic/complexity/overuses.py]
1 import ast
2 from typing import Union
3
4 from wemake_python_styleguide.compat.aliases import FunctionNodes
5 from wemake_python_styleguide.constants import SPECIAL_ARGUMENT_NAMES_WHITELIST
6 from wemake_python_styleguide.logic import nodes, walk
7 from wemake_python_styleguide.logic.arguments import call_args
8
9 #: Nodes that can be annotated.
10 _AnnNodes = (ast.AnnAssign, ast.arg)
11
12
13 def is_class_context(node: ast.AST) -> bool:
14 """
15 Detects if a node is inside a class context.
16
17 We use this predicate because classes have quite complex
18 DSL to be created: like django-orm, attrs, and dataclasses.
19 And these DSLs are built using attributes and calls.
20 """
21 return isinstance(nodes.get_context(node), ast.ClassDef)
22
23
24 def is_super_call(node: ast.AST) -> bool:
25 """
26 Detects if super is called.
27
28 We use this predicate because we can call ``super()`` a lot in our code.
29 And it is fine.
30 """
31 if isinstance(node, ast.Call) and isinstance(node.func, ast.Name):
32 return node.func.id == 'super'
33 return False
34
35
36 def is_decorator(node: ast.AST) -> bool:
37 """
38 Detects if node is used as a decorator.
39
40 We use this predicates because decorators can be used miltiple times.
41 Like ``@auth_required(login_url=LOGIN_URL)`` and similar.
42 """
43 parent = walk.get_closest_parent(node, FunctionNodes)
44 if isinstance(parent, FunctionNodes) and parent.decorator_list:
45 return any(
46 node == decorator or walk.is_contained_by(node, decorator)
47 for decorator in parent.decorator_list
48 )
49 return False
50
51
52 def is_self(node: ast.AST) -> bool:
53 """
54 Detects if node is ``self``, ``cls``, or ``mcs`` call.
55
56 We use this predicate because we allow a lot of ``self.method()`` or
57 ``self[start:end]`` calls. This is fine.
58
59 We do not check for attribute access, because ``ast.Attribute`` nodes
60 are globally ignored.
61 """
62 self_node: Union[ast.Attribute, ast.Subscript, None] = None
63 if isinstance(node, ast.Call) and isinstance(node.func, ast.Attribute):
64 self_node = node.func
65 elif isinstance(node, ast.Subscript):
66 self_node = node
67
68 return bool(
69 self_node and
70 isinstance(self_node.value, ast.Name) and
71 self_node.value.id in SPECIAL_ARGUMENT_NAMES_WHITELIST,
72 )
73
74
75 def is_annotation(node: ast.AST) -> bool:
76 """
77 Detects if node is an annotation. Or a part of it.
78
79 We use this predicate to allow all types of repetetive
80 function and instance annotations.
81 """
82 if not isinstance(node, (ast.Str, ast.List, ast.Tuple, ast.Subscript)):
83 return False
84
85 annotated = walk.get_closest_parent(node, (*_AnnNodes, *FunctionNodes))
86 if isinstance(annotated, FunctionNodes):
87 contains_node = bool(
88 annotated.returns and
89 walk.is_contained_by(node, annotated.returns),
90 )
91 return node == annotated.returns or contains_node
92 elif isinstance(annotated, _AnnNodes):
93 contains_node = bool(
94 annotated.annotation and
95 walk.is_contained_by(node, annotated.annotation),
96 )
97 return node == annotated.annotation or contains_node
98 return False
99
100
101 def is_primitive(node: ast.AST) -> bool:
102 """
103 Detects if node is a form of a primitive value.
104
105 We use this predicate to allow values
106 like ``[]`` or ``call()`` to be overused.
107 Because you cannot simplify them.
108
109 We do not check for strings, numbers, etc
110 because they are globally ignored.
111 """
112 if isinstance(node, (ast.Tuple, ast.List)):
113 return not node.elts # we do allow `[]` and `()`
114 elif isinstance(node, ast.Set):
115 return ( # we do allow `{*set_items}`
116 len(node.elts) == 1 and
117 isinstance(node.elts[0], ast.Starred)
118 )
119 elif isinstance(node, ast.Dict): # we do allow `{}` and `{**values}`
120 return not list(filter(None, node.keys))
121 elif isinstance(node, ast.Call):
122 return not call_args.get_all_args(node) # we do allow `call()`
123 return False
124
[end of wemake_python_styleguide/logic/complexity/overuses.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wemake_python_styleguide/logic/complexity/overuses.py b/wemake_python_styleguide/logic/complexity/overuses.py
--- a/wemake_python_styleguide/logic/complexity/overuses.py
+++ b/wemake_python_styleguide/logic/complexity/overuses.py
@@ -2,6 +2,7 @@
from typing import Union
from wemake_python_styleguide.compat.aliases import FunctionNodes
+from wemake_python_styleguide.compat.nodes import Constant
from wemake_python_styleguide.constants import SPECIAL_ARGUMENT_NAMES_WHITELIST
from wemake_python_styleguide.logic import nodes, walk
from wemake_python_styleguide.logic.arguments import call_args
@@ -121,3 +122,21 @@
elif isinstance(node, ast.Call):
return not call_args.get_all_args(node) # we do allow `call()`
return False
+
+
+def is_unary_minus(node: ast.AST) -> bool:
+ """
+ Detects if node is unary minus operator.
+
+ We use this predicate to allow values
+ like ``-some_value`` to be overused.
+
+ Although negative constants like ``-1``
+ should raise violation to force naming them.
+ """
+ if isinstance(node, ast.UnaryOp):
+ return (
+ isinstance(node.op, ast.USub) and
+ not isinstance(node.operand, (Constant, ast.Num))
+ )
+ return False
diff --git a/wemake_python_styleguide/visitors/ast/complexity/overuses.py b/wemake_python_styleguide/visitors/ast/complexity/overuses.py
--- a/wemake_python_styleguide/visitors/ast/complexity/overuses.py
+++ b/wemake_python_styleguide/visitors/ast/complexity/overuses.py
@@ -101,10 +101,10 @@
ast.Assert,
ast.BoolOp,
ast.BinOp,
+ ast.UnaryOp,
ast.Call,
ast.Compare,
ast.Subscript,
- ast.UnaryOp,
ast.Lambda,
ast.DictComp,
@@ -124,6 +124,7 @@
overuses.is_class_context,
overuses.is_super_call,
overuses.is_primitive,
+ overuses.is_unary_minus,
)
_msg: ClassVar[str] = '{0}; used {1}'
| {"golden_diff": "diff --git a/wemake_python_styleguide/logic/complexity/overuses.py b/wemake_python_styleguide/logic/complexity/overuses.py\n--- a/wemake_python_styleguide/logic/complexity/overuses.py\n+++ b/wemake_python_styleguide/logic/complexity/overuses.py\n@@ -2,6 +2,7 @@\n from typing import Union\n \n from wemake_python_styleguide.compat.aliases import FunctionNodes\n+from wemake_python_styleguide.compat.nodes import Constant\n from wemake_python_styleguide.constants import SPECIAL_ARGUMENT_NAMES_WHITELIST\n from wemake_python_styleguide.logic import nodes, walk\n from wemake_python_styleguide.logic.arguments import call_args\n@@ -121,3 +122,21 @@\n elif isinstance(node, ast.Call):\n return not call_args.get_all_args(node) # we do allow `call()`\n return False\n+\n+\n+def is_unary_minus(node: ast.AST) -> bool:\n+ \"\"\"\n+ Detects if node is unary minus operator.\n+\n+ We use this predicate to allow values\n+ like ``-some_value`` to be overused.\n+\n+ Although negative constants like ``-1``\n+ should raise violation to force naming them.\n+ \"\"\"\n+ if isinstance(node, ast.UnaryOp):\n+ return (\n+ isinstance(node.op, ast.USub) and\n+ not isinstance(node.operand, (Constant, ast.Num))\n+ )\n+ return False\ndiff --git a/wemake_python_styleguide/visitors/ast/complexity/overuses.py b/wemake_python_styleguide/visitors/ast/complexity/overuses.py\n--- a/wemake_python_styleguide/visitors/ast/complexity/overuses.py\n+++ b/wemake_python_styleguide/visitors/ast/complexity/overuses.py\n@@ -101,10 +101,10 @@\n ast.Assert,\n ast.BoolOp,\n ast.BinOp,\n+ ast.UnaryOp,\n ast.Call,\n ast.Compare,\n ast.Subscript,\n- ast.UnaryOp,\n ast.Lambda,\n \n ast.DictComp,\n@@ -124,6 +124,7 @@\n overuses.is_class_context,\n overuses.is_super_call,\n overuses.is_primitive,\n+ overuses.is_unary_minus,\n )\n \n _msg: ClassVar[str] = '{0}; used {1}'\n", "issue": "WPS204: Overused expression should not count unary minus operator\n# Bug report\r\n\r\nUsing unary minus operator often breaks rule WPS204\r\n\r\n## What's wrong\r\n\r\nUsing unary minus operator even with named variable violates rule WPS204.\r\nExample:\r\n```python\r\nmy_value = 5\r\nposition = (0, -my_value)\r\n```\r\n\r\n## How it should be\r\n\r\nNo violation should be raised.\r\n\r\n## System information\r\n\r\n### flake8 information\r\n\r\nContents of `flake8 --bug-report`:\r\n\r\n### pip information\r\n\r\nContents of `pip freeze`:\r\n\n", "before_files": [{"content": "import ast\nfrom collections import defaultdict\nfrom typing import (\n Callable,\n ClassVar,\n DefaultDict,\n FrozenSet,\n List,\n Tuple,\n Union,\n)\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.compat.aliases import FunctionNodes\nfrom wemake_python_styleguide.logic import source, walk\nfrom wemake_python_styleguide.logic.complexity import overuses\nfrom wemake_python_styleguide.types import AnyNodes, AnyText, AnyTextPrimitive\nfrom wemake_python_styleguide.violations import complexity\nfrom wemake_python_styleguide.visitors import base, decorators\n\n#: We use these types to store the number of nodes usage in different contexts.\n_Expressions = DefaultDict[str, List[ast.AST]]\n_FunctionExpressions = DefaultDict[ast.AST, _Expressions]\n_StringConstants = FrozenSet[Union[str, bytes]]\n\n\n@final\[email protected]('visit_any_string', (\n 'visit_Str',\n 'visit_Bytes',\n))\nclass StringOveruseVisitor(base.BaseNodeVisitor):\n \"\"\"\n Restricts repeated usage of the same string constant.\n\n NB: Some short strings are ignored, as their use is very common and\n forcing assignment would not make much sense (i.e. newlines or \"\").\n \"\"\"\n\n _ignored_string_constants: ClassVar[_StringConstants] = frozenset((\n ' ',\n '',\n '\\n',\n '\\r\\n',\n '\\t',\n b' ',\n b'',\n b'\\n',\n b'\\r\\n',\n b'\\t',\n ))\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Inits the counter for constants.\"\"\"\n super().__init__(*args, **kwargs)\n self._string_constants: DefaultDict[\n AnyTextPrimitive, int,\n ] = defaultdict(int)\n\n def visit_any_string(self, node: AnyText) -> None:\n \"\"\"\n Restricts to over-use string constants.\n\n Raises:\n OverusedStringViolation\n\n \"\"\"\n self._check_string_constant(node)\n self.generic_visit(node)\n\n def _check_string_constant(self, node: AnyText) -> None:\n if overuses.is_annotation(node):\n return\n\n # Some strings are so common, that it makes no sense to check if\n # they are overused.\n if node.s in self._ignored_string_constants:\n return\n\n self._string_constants[node.s] += 1\n\n def _post_visit(self) -> None:\n for string, usage_count in self._string_constants.items():\n if usage_count > self.options.max_string_usages:\n self.add_violation(\n complexity.OverusedStringViolation(\n text=source.render_string(string) or \"''\",\n baseline=self.options.max_string_usages,\n ),\n )\n\n\n@final\nclass ExpressionOveruseVisitor(base.BaseNodeVisitor):\n \"\"\"Finds overused expressions.\"\"\"\n\n _expressions: ClassVar[AnyNodes] = (\n # We do not treat `ast.Attribute`s as expressions\n # because they are too widely used. That's a compromise.\n ast.Assert,\n ast.BoolOp,\n ast.BinOp,\n ast.Call,\n ast.Compare,\n ast.Subscript,\n ast.UnaryOp,\n ast.Lambda,\n\n ast.DictComp,\n ast.Dict,\n ast.List,\n ast.ListComp,\n ast.Tuple,\n ast.GeneratorExp,\n ast.Set,\n ast.SetComp,\n )\n\n _ignore_predicates: Tuple[Callable[[ast.AST], bool], ...] = (\n overuses.is_decorator,\n overuses.is_self,\n overuses.is_annotation,\n overuses.is_class_context,\n overuses.is_super_call,\n overuses.is_primitive,\n )\n\n _msg: ClassVar[str] = '{0}; used {1}'\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"We need to track expression usage in functions and modules.\"\"\"\n super().__init__(*args, **kwargs)\n self._module_expressions: _Expressions = defaultdict(list)\n self._function_expressions: _FunctionExpressions = defaultdict(\n lambda: defaultdict(list),\n )\n\n def visit(self, node: ast.AST) -> None:\n \"\"\"\n Visits all nodes in a module to find overused values.\n\n Raises:\n OverusedExpressionViolation\n\n \"\"\"\n if isinstance(node, self._expressions):\n self._add_expression(node)\n self.generic_visit(node)\n\n def _add_expression(self, node: ast.AST) -> None:\n if any(ignore(node) for ignore in self._ignore_predicates):\n return\n\n source_code = source.node_to_string(node)\n self._module_expressions[source_code].append(node)\n\n maybe_function = walk.get_closest_parent(node, FunctionNodes)\n if maybe_function is not None:\n self._function_expressions[maybe_function][source_code].append(\n node,\n )\n\n def _post_visit(self) -> None:\n for mod_source, module_nodes in self._module_expressions.items():\n if len(module_nodes) > self.options.max_module_expressions:\n self.add_violation(\n complexity.OverusedExpressionViolation(\n module_nodes[0],\n text=self._msg.format(mod_source, len(module_nodes)),\n baseline=self.options.max_module_expressions,\n ),\n )\n\n for function_contexts in self._function_expressions.values():\n for src, function_nodes in function_contexts.items():\n if len(function_nodes) > self.options.max_function_expressions:\n self.add_violation(\n complexity.OverusedExpressionViolation(\n function_nodes[0],\n text=self._msg.format(src, len(function_nodes)),\n baseline=self.options.max_function_expressions,\n ),\n )\n", "path": "wemake_python_styleguide/visitors/ast/complexity/overuses.py"}, {"content": "import ast\nfrom typing import Union\n\nfrom wemake_python_styleguide.compat.aliases import FunctionNodes\nfrom wemake_python_styleguide.constants import SPECIAL_ARGUMENT_NAMES_WHITELIST\nfrom wemake_python_styleguide.logic import nodes, walk\nfrom wemake_python_styleguide.logic.arguments import call_args\n\n#: Nodes that can be annotated.\n_AnnNodes = (ast.AnnAssign, ast.arg)\n\n\ndef is_class_context(node: ast.AST) -> bool:\n \"\"\"\n Detects if a node is inside a class context.\n\n We use this predicate because classes have quite complex\n DSL to be created: like django-orm, attrs, and dataclasses.\n And these DSLs are built using attributes and calls.\n \"\"\"\n return isinstance(nodes.get_context(node), ast.ClassDef)\n\n\ndef is_super_call(node: ast.AST) -> bool:\n \"\"\"\n Detects if super is called.\n\n We use this predicate because we can call ``super()`` a lot in our code.\n And it is fine.\n \"\"\"\n if isinstance(node, ast.Call) and isinstance(node.func, ast.Name):\n return node.func.id == 'super'\n return False\n\n\ndef is_decorator(node: ast.AST) -> bool:\n \"\"\"\n Detects if node is used as a decorator.\n\n We use this predicates because decorators can be used miltiple times.\n Like ``@auth_required(login_url=LOGIN_URL)`` and similar.\n \"\"\"\n parent = walk.get_closest_parent(node, FunctionNodes)\n if isinstance(parent, FunctionNodes) and parent.decorator_list:\n return any(\n node == decorator or walk.is_contained_by(node, decorator)\n for decorator in parent.decorator_list\n )\n return False\n\n\ndef is_self(node: ast.AST) -> bool:\n \"\"\"\n Detects if node is ``self``, ``cls``, or ``mcs`` call.\n\n We use this predicate because we allow a lot of ``self.method()`` or\n ``self[start:end]`` calls. This is fine.\n\n We do not check for attribute access, because ``ast.Attribute`` nodes\n are globally ignored.\n \"\"\"\n self_node: Union[ast.Attribute, ast.Subscript, None] = None\n if isinstance(node, ast.Call) and isinstance(node.func, ast.Attribute):\n self_node = node.func\n elif isinstance(node, ast.Subscript):\n self_node = node\n\n return bool(\n self_node and\n isinstance(self_node.value, ast.Name) and\n self_node.value.id in SPECIAL_ARGUMENT_NAMES_WHITELIST,\n )\n\n\ndef is_annotation(node: ast.AST) -> bool:\n \"\"\"\n Detects if node is an annotation. Or a part of it.\n\n We use this predicate to allow all types of repetetive\n function and instance annotations.\n \"\"\"\n if not isinstance(node, (ast.Str, ast.List, ast.Tuple, ast.Subscript)):\n return False\n\n annotated = walk.get_closest_parent(node, (*_AnnNodes, *FunctionNodes))\n if isinstance(annotated, FunctionNodes):\n contains_node = bool(\n annotated.returns and\n walk.is_contained_by(node, annotated.returns),\n )\n return node == annotated.returns or contains_node\n elif isinstance(annotated, _AnnNodes):\n contains_node = bool(\n annotated.annotation and\n walk.is_contained_by(node, annotated.annotation),\n )\n return node == annotated.annotation or contains_node\n return False\n\n\ndef is_primitive(node: ast.AST) -> bool:\n \"\"\"\n Detects if node is a form of a primitive value.\n\n We use this predicate to allow values\n like ``[]`` or ``call()`` to be overused.\n Because you cannot simplify them.\n\n We do not check for strings, numbers, etc\n because they are globally ignored.\n \"\"\"\n if isinstance(node, (ast.Tuple, ast.List)):\n return not node.elts # we do allow `[]` and `()`\n elif isinstance(node, ast.Set):\n return ( # we do allow `{*set_items}`\n len(node.elts) == 1 and\n isinstance(node.elts[0], ast.Starred)\n )\n elif isinstance(node, ast.Dict): # we do allow `{}` and `{**values}`\n return not list(filter(None, node.keys))\n elif isinstance(node, ast.Call):\n return not call_args.get_all_args(node) # we do allow `call()`\n return False\n", "path": "wemake_python_styleguide/logic/complexity/overuses.py"}]} | 3,645 | 546 |
gh_patches_debug_9612 | rasdani/github-patches | git_diff | ray-project__ray-10547 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[dask-on-ray] Set task name to Dask key at task submission time.
<!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant-->
### Describe your feature request
To provide better dashboard, metrics, and debugging UX, we should set the task name to the Dask key. The Dask-Ray scheduler [uses a single remote function](https://github.com/ray-project/ray/blob/ef18893fb50c97c2c0d9d7b8b951455683f89635/python/ray/experimental/dask/scheduler.py#L208-L233) for executing all Dask tasks; Dask task functions are all dynamically composed (via partial application and the like), and using this single remote function allows us to cut down on function exporting overhead. However, this means that the default task name (derived from the function descriptor) is the same for every Dask task, which hurts dashboard and debugging UX considerably. By setting the task name to the Dask key at task submission time, we'll be able to attach meaningful semantics to each task that will be displayed in the dashboard and included in task spec log records.
### Blockers
Blocked by https://github.com/ray-project/ray/issues/10371.
</issue>
<code>
[start of python/ray/experimental/dask/scheduler.py]
1 import atexit
2 from collections import defaultdict
3 from multiprocessing.pool import ThreadPool
4 import threading
5
6 import ray
7
8 from dask.core import istask, ishashable, _execute_task
9 from dask.local import get_async, apply_sync
10 from dask.system import CPU_COUNT
11 from dask.threaded import pack_exception, _thread_get_id
12
13 from .common import unpack_object_refs
14
15 main_thread = threading.current_thread()
16 default_pool = None
17 pools = defaultdict(dict)
18 pools_lock = threading.Lock()
19
20
21 def ray_dask_get(dsk, keys, **kwargs):
22 """
23 A Dask-Ray scheduler. This scheduler will send top-level (non-inlined) Dask
24 tasks to a Ray cluster for execution. The scheduler will wait for the
25 tasks to finish executing, fetch the results, and repackage them into the
26 appropriate Dask collections. This particular scheduler uses a threadpool
27 to submit Ray tasks.
28
29 This can be passed directly to `dask.compute()`, as the scheduler:
30
31 >>> dask.compute(obj, scheduler=ray_dask_get)
32
33 You can override the number of threads to use when submitting the
34 Ray tasks, or the threadpool used to submit Ray tasks:
35
36 >>> dask.compute(
37 obj,
38 scheduler=ray_dask_get,
39 num_workers=8,
40 pool=some_cool_pool,
41 )
42
43 Args:
44 dsk (Dict): Dask graph, represented as a task DAG dictionary.
45 keys (List[str]): List of Dask graph keys whose values we wish to
46 compute and return.
47 num_workers (Optional[int]): The number of worker threads to use in
48 the Ray task submission traversal of the Dask graph.
49 pool (Optional[ThreadPool]): A multiprocessing threadpool to use to
50 submit Ray tasks.
51
52 Returns:
53 Computed values corresponding to the provided keys.
54 """
55 num_workers = kwargs.pop("num_workers", None)
56 pool = kwargs.pop("pool", None)
57 # We attempt to reuse any other thread pools that have been created within
58 # this thread and with the given number of workers. We reuse a global
59 # thread pool if num_workers is not given and we're in the main thread.
60 global default_pool
61 thread = threading.current_thread()
62 if pool is None:
63 with pools_lock:
64 if num_workers is None and thread is main_thread:
65 if default_pool is None:
66 default_pool = ThreadPool(CPU_COUNT)
67 atexit.register(default_pool.close)
68 pool = default_pool
69 elif thread in pools and num_workers in pools[thread]:
70 pool = pools[thread][num_workers]
71 else:
72 pool = ThreadPool(num_workers)
73 atexit.register(pool.close)
74 pools[thread][num_workers] = pool
75
76 # NOTE: We hijack Dask's `get_async` function, injecting a different task
77 # executor.
78 object_refs = get_async(
79 _apply_async_wrapper(pool.apply_async, _rayify_task_wrapper),
80 len(pool._pool),
81 dsk,
82 keys,
83 get_id=_thread_get_id,
84 pack_exception=pack_exception,
85 **kwargs,
86 )
87 # NOTE: We explicitly delete the Dask graph here so object references
88 # are garbage-collected before this function returns, i.e. before all Ray
89 # tasks are done. Otherwise, no intermediate objects will be cleaned up
90 # until all Ray tasks are done.
91 del dsk
92 result = ray_get_unpack(object_refs)
93
94 # cleanup pools associated with dead threads.
95 with pools_lock:
96 active_threads = set(threading.enumerate())
97 if thread is not main_thread:
98 for t in list(pools):
99 if t not in active_threads:
100 for p in pools.pop(t).values():
101 p.close()
102 return result
103
104
105 def _apply_async_wrapper(apply_async, real_func, *extra_args, **extra_kwargs):
106 """
107 Wraps the given pool `apply_async` function, hotswapping `real_func` in as
108 the function to be applied and adding `extra_args` and `extra_kwargs` to
109 `real_func`'s call.
110
111 Args:
112 apply_async (callable): The pool function to be wrapped.
113 real_func (callable): The real function that we wish the pool apply
114 function to execute.
115 *extra_args: Extra positional arguments to pass to the `real_func`.
116 **extra_kwargs: Extra keyword arguments to pass to the `real_func`.
117
118 Returns:
119 A wrapper function that will ignore it's first `func` argument and
120 pass `real_func` in its place. To be passed to `dask.local.get_async`.
121 """
122
123 def wrapper(func, args=(), kwds={}, callback=None): # noqa: M511
124 return apply_async(
125 real_func,
126 args=args + extra_args,
127 kwds=dict(kwds, **extra_kwargs),
128 callback=callback,
129 )
130
131 return wrapper
132
133
134 def _rayify_task_wrapper(
135 key,
136 task_info,
137 dumps,
138 loads,
139 get_id,
140 pack_exception,
141 ):
142 """
143 The core Ray-Dask task execution wrapper, to be given to the thread pool's
144 `apply_async` function. Exactly the same as `execute_task`, except that it
145 calls `_rayify_task` on the task instead of `_execute_task`.
146
147 Args:
148 key (str): The Dask graph key whose corresponding task we wish to
149 execute.
150 task_info: The task to execute and its dependencies.
151 dumps (callable): A result serializing function.
152 loads (callable): A task_info deserializing function.
153 get_id (callable): An ID generating function.
154 pack_exception (callable): An exception serializing function.
155
156 Returns:
157 A 3-tuple of the task's key, a literal or a Ray object reference for a
158 Ray task's result, and whether the Ray task submission failed.
159 """
160 try:
161 task, deps = loads(task_info)
162 result = _rayify_task(task, key, deps)
163 id = get_id()
164 result = dumps((result, id))
165 failed = False
166 except BaseException as e:
167 result = pack_exception(e, dumps)
168 failed = True
169 return key, result, failed
170
171
172 def _rayify_task(task, key, deps):
173 """
174 Rayifies the given task, submitting it as a Ray task to the Ray cluster.
175
176 Args:
177 task: A Dask graph value, being either a literal, dependency key, Dask
178 task, or a list thereof.
179 key: The Dask graph key for the given task.
180 deps: The dependencies of this task.
181
182 Returns:
183 A literal, a Ray object reference representing a submitted task, or a
184 list thereof.
185 """
186 if isinstance(task, list):
187 # Recursively rayify this list. This will still bottom out at the first
188 # actual task encountered, inlining any tasks in that task's arguments.
189 return [_rayify_task(t, deps) for t in task]
190 elif istask(task):
191 # Unpacks and repacks Ray object references and submits the task to the
192 # Ray cluster for execution.
193 func, args = task[0], task[1:]
194 # If the function's arguments contain nested object references, we must
195 # unpack said object references into a flat set of arguments so that
196 # Ray properly tracks the object dependencies between Ray tasks.
197 object_refs, repack = unpack_object_refs(args, deps)
198 # Submit the task using a wrapper function.
199 return dask_task_wrapper.remote(func, repack, *object_refs)
200 elif not ishashable(task):
201 return task
202 elif task in deps:
203 return deps[task]
204 else:
205 return task
206
207
208 @ray.remote
209 def dask_task_wrapper(func, repack, *args):
210 """
211 A Ray remote function acting as a Dask task wrapper. This function will
212 repackage the given flat `args` into its original data structures using
213 `repack`, execute any Dask subtasks within the repackaged arguments
214 (inlined by Dask's optimization pass), and then pass the concrete task
215 arguments to the provide Dask task function, `func`.
216
217 Args:
218 func (callable): The Dask task function to execute.
219 repack (callable): A function that repackages the provided args into
220 the original (possibly nested) Python objects.
221 *args (ObjectRef): Ray object references representing the Dask task's
222 arguments.
223
224 Returns:
225 The output of the Dask task. In the context of Ray, a
226 dask_task_wrapper.remote() invocation will return a Ray object
227 reference representing the Ray task's result.
228 """
229 repacked_args, repacked_deps = repack(args)
230 # Recursively execute Dask-inlined tasks.
231 actual_args = [_execute_task(a, repacked_deps) for a in repacked_args]
232 # Execute the actual underlying Dask task.
233 return func(*actual_args)
234
235
236 def ray_get_unpack(object_refs):
237 """
238 Unpacks object references, gets the object references, and repacks.
239 Traverses arbitrary data structures.
240
241 Args:
242 object_refs: A (potentially nested) Python object containing Ray object
243 references.
244
245 Returns:
246 The input Python object with all contained Ray object references
247 resolved with their concrete values.
248 """
249 if isinstance(object_refs,
250 (tuple, list)) and any(not isinstance(x, ray.ObjectRef)
251 for x in object_refs):
252 # We flatten the object references before calling ray.get(), since Dask
253 # loves to nest collections in nested tuples and Ray expects a flat
254 # list of object references. We repack the results after ray.get()
255 # completes.
256 object_refs, repack = unpack_object_refs(*object_refs)
257 computed_result = ray.get(object_refs)
258 return repack(computed_result)
259 else:
260 return ray.get(object_refs)
261
262
263 def ray_dask_get_sync(dsk, keys, **kwargs):
264 """
265 A synchronous Dask-Ray scheduler. This scheduler will send top-level
266 (non-inlined) Dask tasks to a Ray cluster for execution. The scheduler will
267 wait for the tasks to finish executing, fetch the results, and repackage
268 them into the appropriate Dask collections. This particular scheduler
269 submits Ray tasks synchronously, which can be useful for debugging.
270
271 This can be passed directly to `dask.compute()`, as the scheduler:
272
273 >>> dask.compute(obj, scheduler=ray_dask_get_sync)
274
275 Args:
276 dsk (Dict): Dask graph, represented as a task DAG dictionary.
277 keys (List[str]): List of Dask graph keys whose values we wish to
278 compute and return.
279
280 Returns:
281 Computed values corresponding to the provided keys.
282 """
283 # NOTE: We hijack Dask's `get_async` function, injecting a different task
284 # executor.
285 object_refs = get_async(
286 _apply_async_wrapper(apply_sync, _rayify_task_wrapper),
287 1,
288 dsk,
289 keys,
290 **kwargs,
291 )
292 # NOTE: We explicitly delete the Dask graph here so object references
293 # are garbage-collected before this function returns, i.e. before all Ray
294 # tasks are done. Otherwise, no intermediate objects will be cleaned up
295 # until all Ray tasks are done.
296 del dsk
297 return ray_get_unpack(object_refs)
298
[end of python/ray/experimental/dask/scheduler.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/python/ray/experimental/dask/scheduler.py b/python/ray/experimental/dask/scheduler.py
--- a/python/ray/experimental/dask/scheduler.py
+++ b/python/ray/experimental/dask/scheduler.py
@@ -196,7 +196,8 @@
# Ray properly tracks the object dependencies between Ray tasks.
object_refs, repack = unpack_object_refs(args, deps)
# Submit the task using a wrapper function.
- return dask_task_wrapper.remote(func, repack, *object_refs)
+ return dask_task_wrapper.options(name=f"dask:{key!s}").remote(
+ func, repack, *object_refs)
elif not ishashable(task):
return task
elif task in deps:
| {"golden_diff": "diff --git a/python/ray/experimental/dask/scheduler.py b/python/ray/experimental/dask/scheduler.py\n--- a/python/ray/experimental/dask/scheduler.py\n+++ b/python/ray/experimental/dask/scheduler.py\n@@ -196,7 +196,8 @@\n # Ray properly tracks the object dependencies between Ray tasks.\n object_refs, repack = unpack_object_refs(args, deps)\n # Submit the task using a wrapper function.\n- return dask_task_wrapper.remote(func, repack, *object_refs)\n+ return dask_task_wrapper.options(name=f\"dask:{key!s}\").remote(\n+ func, repack, *object_refs)\n elif not ishashable(task):\n return task\n elif task in deps:\n", "issue": "[dask-on-ray] Set task name to Dask key at task submission time.\n<!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant-->\r\n\r\n### Describe your feature request\r\n\r\nTo provide better dashboard, metrics, and debugging UX, we should set the task name to the Dask key. The Dask-Ray scheduler [uses a single remote function](https://github.com/ray-project/ray/blob/ef18893fb50c97c2c0d9d7b8b951455683f89635/python/ray/experimental/dask/scheduler.py#L208-L233) for executing all Dask tasks; Dask task functions are all dynamically composed (via partial application and the like), and using this single remote function allows us to cut down on function exporting overhead. However, this means that the default task name (derived from the function descriptor) is the same for every Dask task, which hurts dashboard and debugging UX considerably. By setting the task name to the Dask key at task submission time, we'll be able to attach meaningful semantics to each task that will be displayed in the dashboard and included in task spec log records.\r\n\r\n### Blockers\r\n\r\nBlocked by https://github.com/ray-project/ray/issues/10371.\n", "before_files": [{"content": "import atexit\nfrom collections import defaultdict\nfrom multiprocessing.pool import ThreadPool\nimport threading\n\nimport ray\n\nfrom dask.core import istask, ishashable, _execute_task\nfrom dask.local import get_async, apply_sync\nfrom dask.system import CPU_COUNT\nfrom dask.threaded import pack_exception, _thread_get_id\n\nfrom .common import unpack_object_refs\n\nmain_thread = threading.current_thread()\ndefault_pool = None\npools = defaultdict(dict)\npools_lock = threading.Lock()\n\n\ndef ray_dask_get(dsk, keys, **kwargs):\n \"\"\"\n A Dask-Ray scheduler. This scheduler will send top-level (non-inlined) Dask\n tasks to a Ray cluster for execution. The scheduler will wait for the\n tasks to finish executing, fetch the results, and repackage them into the\n appropriate Dask collections. This particular scheduler uses a threadpool\n to submit Ray tasks.\n\n This can be passed directly to `dask.compute()`, as the scheduler:\n\n >>> dask.compute(obj, scheduler=ray_dask_get)\n\n You can override the number of threads to use when submitting the\n Ray tasks, or the threadpool used to submit Ray tasks:\n\n >>> dask.compute(\n obj,\n scheduler=ray_dask_get,\n num_workers=8,\n pool=some_cool_pool,\n )\n\n Args:\n dsk (Dict): Dask graph, represented as a task DAG dictionary.\n keys (List[str]): List of Dask graph keys whose values we wish to\n compute and return.\n num_workers (Optional[int]): The number of worker threads to use in\n the Ray task submission traversal of the Dask graph.\n pool (Optional[ThreadPool]): A multiprocessing threadpool to use to\n submit Ray tasks.\n\n Returns:\n Computed values corresponding to the provided keys.\n \"\"\"\n num_workers = kwargs.pop(\"num_workers\", None)\n pool = kwargs.pop(\"pool\", None)\n # We attempt to reuse any other thread pools that have been created within\n # this thread and with the given number of workers. We reuse a global\n # thread pool if num_workers is not given and we're in the main thread.\n global default_pool\n thread = threading.current_thread()\n if pool is None:\n with pools_lock:\n if num_workers is None and thread is main_thread:\n if default_pool is None:\n default_pool = ThreadPool(CPU_COUNT)\n atexit.register(default_pool.close)\n pool = default_pool\n elif thread in pools and num_workers in pools[thread]:\n pool = pools[thread][num_workers]\n else:\n pool = ThreadPool(num_workers)\n atexit.register(pool.close)\n pools[thread][num_workers] = pool\n\n # NOTE: We hijack Dask's `get_async` function, injecting a different task\n # executor.\n object_refs = get_async(\n _apply_async_wrapper(pool.apply_async, _rayify_task_wrapper),\n len(pool._pool),\n dsk,\n keys,\n get_id=_thread_get_id,\n pack_exception=pack_exception,\n **kwargs,\n )\n # NOTE: We explicitly delete the Dask graph here so object references\n # are garbage-collected before this function returns, i.e. before all Ray\n # tasks are done. Otherwise, no intermediate objects will be cleaned up\n # until all Ray tasks are done.\n del dsk\n result = ray_get_unpack(object_refs)\n\n # cleanup pools associated with dead threads.\n with pools_lock:\n active_threads = set(threading.enumerate())\n if thread is not main_thread:\n for t in list(pools):\n if t not in active_threads:\n for p in pools.pop(t).values():\n p.close()\n return result\n\n\ndef _apply_async_wrapper(apply_async, real_func, *extra_args, **extra_kwargs):\n \"\"\"\n Wraps the given pool `apply_async` function, hotswapping `real_func` in as\n the function to be applied and adding `extra_args` and `extra_kwargs` to\n `real_func`'s call.\n\n Args:\n apply_async (callable): The pool function to be wrapped.\n real_func (callable): The real function that we wish the pool apply\n function to execute.\n *extra_args: Extra positional arguments to pass to the `real_func`.\n **extra_kwargs: Extra keyword arguments to pass to the `real_func`.\n\n Returns:\n A wrapper function that will ignore it's first `func` argument and\n pass `real_func` in its place. To be passed to `dask.local.get_async`.\n \"\"\"\n\n def wrapper(func, args=(), kwds={}, callback=None): # noqa: M511\n return apply_async(\n real_func,\n args=args + extra_args,\n kwds=dict(kwds, **extra_kwargs),\n callback=callback,\n )\n\n return wrapper\n\n\ndef _rayify_task_wrapper(\n key,\n task_info,\n dumps,\n loads,\n get_id,\n pack_exception,\n):\n \"\"\"\n The core Ray-Dask task execution wrapper, to be given to the thread pool's\n `apply_async` function. Exactly the same as `execute_task`, except that it\n calls `_rayify_task` on the task instead of `_execute_task`.\n\n Args:\n key (str): The Dask graph key whose corresponding task we wish to\n execute.\n task_info: The task to execute and its dependencies.\n dumps (callable): A result serializing function.\n loads (callable): A task_info deserializing function.\n get_id (callable): An ID generating function.\n pack_exception (callable): An exception serializing function.\n\n Returns:\n A 3-tuple of the task's key, a literal or a Ray object reference for a\n Ray task's result, and whether the Ray task submission failed.\n \"\"\"\n try:\n task, deps = loads(task_info)\n result = _rayify_task(task, key, deps)\n id = get_id()\n result = dumps((result, id))\n failed = False\n except BaseException as e:\n result = pack_exception(e, dumps)\n failed = True\n return key, result, failed\n\n\ndef _rayify_task(task, key, deps):\n \"\"\"\n Rayifies the given task, submitting it as a Ray task to the Ray cluster.\n\n Args:\n task: A Dask graph value, being either a literal, dependency key, Dask\n task, or a list thereof.\n key: The Dask graph key for the given task.\n deps: The dependencies of this task.\n\n Returns:\n A literal, a Ray object reference representing a submitted task, or a\n list thereof.\n \"\"\"\n if isinstance(task, list):\n # Recursively rayify this list. This will still bottom out at the first\n # actual task encountered, inlining any tasks in that task's arguments.\n return [_rayify_task(t, deps) for t in task]\n elif istask(task):\n # Unpacks and repacks Ray object references and submits the task to the\n # Ray cluster for execution.\n func, args = task[0], task[1:]\n # If the function's arguments contain nested object references, we must\n # unpack said object references into a flat set of arguments so that\n # Ray properly tracks the object dependencies between Ray tasks.\n object_refs, repack = unpack_object_refs(args, deps)\n # Submit the task using a wrapper function.\n return dask_task_wrapper.remote(func, repack, *object_refs)\n elif not ishashable(task):\n return task\n elif task in deps:\n return deps[task]\n else:\n return task\n\n\[email protected]\ndef dask_task_wrapper(func, repack, *args):\n \"\"\"\n A Ray remote function acting as a Dask task wrapper. This function will\n repackage the given flat `args` into its original data structures using\n `repack`, execute any Dask subtasks within the repackaged arguments\n (inlined by Dask's optimization pass), and then pass the concrete task\n arguments to the provide Dask task function, `func`.\n\n Args:\n func (callable): The Dask task function to execute.\n repack (callable): A function that repackages the provided args into\n the original (possibly nested) Python objects.\n *args (ObjectRef): Ray object references representing the Dask task's\n arguments.\n\n Returns:\n The output of the Dask task. In the context of Ray, a\n dask_task_wrapper.remote() invocation will return a Ray object\n reference representing the Ray task's result.\n \"\"\"\n repacked_args, repacked_deps = repack(args)\n # Recursively execute Dask-inlined tasks.\n actual_args = [_execute_task(a, repacked_deps) for a in repacked_args]\n # Execute the actual underlying Dask task.\n return func(*actual_args)\n\n\ndef ray_get_unpack(object_refs):\n \"\"\"\n Unpacks object references, gets the object references, and repacks.\n Traverses arbitrary data structures.\n\n Args:\n object_refs: A (potentially nested) Python object containing Ray object\n references.\n\n Returns:\n The input Python object with all contained Ray object references\n resolved with their concrete values.\n \"\"\"\n if isinstance(object_refs,\n (tuple, list)) and any(not isinstance(x, ray.ObjectRef)\n for x in object_refs):\n # We flatten the object references before calling ray.get(), since Dask\n # loves to nest collections in nested tuples and Ray expects a flat\n # list of object references. We repack the results after ray.get()\n # completes.\n object_refs, repack = unpack_object_refs(*object_refs)\n computed_result = ray.get(object_refs)\n return repack(computed_result)\n else:\n return ray.get(object_refs)\n\n\ndef ray_dask_get_sync(dsk, keys, **kwargs):\n \"\"\"\n A synchronous Dask-Ray scheduler. This scheduler will send top-level\n (non-inlined) Dask tasks to a Ray cluster for execution. The scheduler will\n wait for the tasks to finish executing, fetch the results, and repackage\n them into the appropriate Dask collections. This particular scheduler\n submits Ray tasks synchronously, which can be useful for debugging.\n\n This can be passed directly to `dask.compute()`, as the scheduler:\n\n >>> dask.compute(obj, scheduler=ray_dask_get_sync)\n\n Args:\n dsk (Dict): Dask graph, represented as a task DAG dictionary.\n keys (List[str]): List of Dask graph keys whose values we wish to\n compute and return.\n\n Returns:\n Computed values corresponding to the provided keys.\n \"\"\"\n # NOTE: We hijack Dask's `get_async` function, injecting a different task\n # executor.\n object_refs = get_async(\n _apply_async_wrapper(apply_sync, _rayify_task_wrapper),\n 1,\n dsk,\n keys,\n **kwargs,\n )\n # NOTE: We explicitly delete the Dask graph here so object references\n # are garbage-collected before this function returns, i.e. before all Ray\n # tasks are done. Otherwise, no intermediate objects will be cleaned up\n # until all Ray tasks are done.\n del dsk\n return ray_get_unpack(object_refs)\n", "path": "python/ray/experimental/dask/scheduler.py"}]} | 4,094 | 168 |
gh_patches_debug_43179 | rasdani/github-patches | git_diff | Pyomo__pyomo-1596 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Scaling transformation does not work with References
References break the scaling transformation because it renames the underlying components, then tries to scale the now-broken `Reference` components. Some options:
1. Rename all components (breaking references), then don't try to scale references
2. Don't rename components
3. Create new, renamed components but don't delete the old components
I think I'm in favor of option 1 for now as long as we document that the scaling transformation breaks references. Maybe we can have a `rename_scaled_components` option?
</issue>
<code>
[start of pyomo/core/plugins/transform/scaling.py]
1 # ___________________________________________________________________________
2 #
3 # Pyomo: Python Optimization Modeling Objects
4 # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
5 # Under the terms of Contract DE-NA0003525 with National Technology and
6 # Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
7 # rights in this software.
8 # This software is distributed under the 3-clause BSD License.
9 # ___________________________________________________________________________
10
11 from pyomo.common.collections import ComponentMap
12 from pyomo.core.base import Var, Constraint, Objective, _ConstraintData, _ObjectiveData, Suffix, value
13 from pyomo.core.plugins.transform.hierarchy import Transformation
14 from pyomo.core.base import TransformationFactory
15 from pyomo.core.expr.current import replace_expressions
16 from pyomo.util.components import rename_components
17
18
19 @TransformationFactory.register('core.scale_model',
20 doc="Scale model variables, constraints, and objectives.")
21 class ScaleModel(Transformation):
22 """
23 Transformation to scale a model.
24
25 This plugin performs variable, constraint, and objective scaling on
26 a model based on the scaling factors in the suffix 'scaling_parameter'
27 set for the variables, constraints, and/or objective. This is typically
28 done to scale the problem for improved numerical properties.
29
30 Supported transformation methods:
31 * :py:meth:`apply_to <pyomo.core.plugins.transform.scaling.ScaleModel.apply_to>`
32 * :py:meth:`create_using <pyomo.core.plugins.transform.scaling.ScaleModel.create_using>`
33 * :py:meth:`propagate_solution <pyomo.core.plugins.transform.scaling.ScaleModel.propagate_solution>`
34
35
36 Examples
37 --------
38
39 .. doctest::
40
41 >>> from pyomo.environ import *
42 >>> # create the model
43 >>> model = ConcreteModel()
44 >>> model.x = Var(bounds=(-5, 5), initialize=1.0)
45 >>> model.y = Var(bounds=(0, 1), initialize=1.0)
46 >>> model.obj = Objective(expr=1e8*model.x + 1e6*model.y)
47 >>> model.con = Constraint(expr=model.x + model.y == 1.0)
48 >>> # create the scaling factors
49 >>> model.scaling_factor = Suffix(direction=Suffix.EXPORT)
50 >>> model.scaling_factor[model.obj] = 1e-6 # scale the objective
51 >>> model.scaling_factor[model.con] = 2.0 # scale the constraint
52 >>> model.scaling_factor[model.x] = 0.2 # scale the x variable
53 >>> # transform the model
54 >>> scaled_model = TransformationFactory('core.scale_model').create_using(model)
55 >>> # print the value of the objective function to show scaling has occurred
56 >>> print(value(model.x))
57 1.0
58 >>> print(value(scaled_model.scaled_x))
59 0.2
60 >>> print(value(scaled_model.scaled_x.lb))
61 -1.0
62 >>> print(value(model.obj))
63 101000000.0
64 >>> print(value(scaled_model.scaled_obj))
65 101.0
66
67 ToDo
68 ====
69 - implement an option to change the variables names or not
70
71 """
72
73 def __init__(self, **kwds):
74 kwds['name'] = "scale_model"
75 self._scaling_method = kwds.pop('scaling_method', 'user')
76 super(ScaleModel, self).__init__(**kwds)
77
78 def _create_using(self, original_model, **kwds):
79 scaled_model = original_model.clone()
80 self._apply_to(scaled_model, **kwds)
81 return scaled_model
82
83 def _get_float_scaling_factor(self, instance, component_data):
84 scaling_factor = None
85 if component_data in instance.scaling_factor:
86 scaling_factor = instance.scaling_factor[component_data]
87 elif component_data.parent_component() in instance.scaling_factor:
88 scaling_factor = instance.scaling_factor[component_data.parent_component()]
89
90 if scaling_factor is None:
91 return 1.0
92
93 try:
94 scaling_factor = float(scaling_factor)
95 except ValueError:
96 raise ValueError(
97 "Suffix 'scaling_factor' has a value %s for component %s that cannot be converted to a float. "
98 "Floating point values are required for this suffix in the ScaleModel transformation."
99 % (scaling_factor, component_data))
100 return scaling_factor
101
102 def _apply_to(self, model, **kwds):
103 # create a map of component to scaling factor
104 component_scaling_factor_map = ComponentMap()
105
106 # if the scaling_method is 'user', get the scaling parameters from the suffixes
107 if self._scaling_method == 'user':
108 # perform some checks to make sure we have the necessary suffixes
109 if type(model.component('scaling_factor')) is not Suffix:
110 raise ValueError("ScaleModel transformation called with scaling_method='user'"
111 ", but cannot find the suffix 'scaling_factor' on the model")
112
113 # get the scaling factors
114 for c in model.component_data_objects(ctype=(Var, Constraint, Objective), descend_into=True):
115 component_scaling_factor_map[c] = self._get_float_scaling_factor(model, c)
116 else:
117 raise ValueError("ScaleModel transformation: unknown scaling_method found"
118 "-- supported values: 'user' ")
119
120 # rename all the Vars, Constraints, and Objectives from foo to scaled_foo
121 scaled_component_to_original_name_map = \
122 rename_components(model=model,
123 component_list=list(model.component_objects(ctype=[Var, Constraint, Objective])),
124 prefix='scaled_')
125
126 # scale the variable bounds and values and build the variable substitution map
127 # for scaling vars in constraints
128 variable_substitution_map = ComponentMap()
129 for variable in [var for var in model.component_objects(ctype=Var, descend_into=True)]:
130 # set the bounds/value for the scaled variable
131 for k in variable:
132 v = variable[k]
133 scaling_factor = component_scaling_factor_map[v]
134 variable_substitution_map[v] = v / scaling_factor
135
136 if v.lb is not None:
137 v.setlb(v.lb * scaling_factor)
138 if v.ub is not None:
139 v.setub(v.ub * scaling_factor)
140 if scaling_factor < 0:
141 temp = v.lb
142 v.setlb(v.ub)
143 v.setub(temp)
144
145 if v.value is not None:
146 v.value = value(v) * scaling_factor
147
148 # scale the objectives/constraints and perform the scaled variable substitution
149 scale_constraint_dual = False
150 if type(model.component('dual')) is Suffix:
151 scale_constraint_dual = True
152
153 # translate the variable_substitution_map (ComponentMap)
154 # to variable_substition_dict (key: id() of component)
155 # ToDo: We should change replace_expressions to accept a ComponentMap as well
156 variable_substitution_dict = {id(k):variable_substitution_map[k]
157 for k in variable_substitution_map}
158
159 for component in model.component_objects(ctype=(Constraint, Objective), descend_into=True):
160 for k in component:
161 c = component[k]
162 # perform the constraint/objective scaling and variable sub
163 scaling_factor = component_scaling_factor_map[c]
164 if isinstance(c, _ConstraintData):
165 body = scaling_factor * \
166 replace_expressions(expr=c.body,
167 substitution_map=variable_substitution_dict,
168 descend_into_named_expressions=True,
169 remove_named_expressions=True)
170
171 # scale the rhs
172 if c._lower is not None:
173 c._lower = c._lower * scaling_factor
174 if c._upper is not None:
175 c._upper = c._upper * scaling_factor
176
177 if scaling_factor < 0:
178 c._lower, c._upper = c._upper, c._lower
179
180 if scale_constraint_dual and c in model.dual:
181 dual_value = model.dual[c]
182 if dual_value is not None:
183 model.dual[c] = dual_value / scaling_factor
184
185 c.set_value((c._lower, body, c._upper))
186
187 elif isinstance(c, _ObjectiveData):
188 c.expr = scaling_factor * \
189 replace_expressions(expr=c.expr,
190 substitution_map=variable_substitution_dict,
191 descend_into_named_expressions=True,
192 remove_named_expressions=True)
193 else:
194 raise NotImplementedError(
195 'Unknown object type found when applying scaling factors in ScaleModel transformation - Internal Error')
196
197 model.component_scaling_factor_map = component_scaling_factor_map
198 model.scaled_component_to_original_name_map = scaled_component_to_original_name_map
199
200 return model
201
202 def propagate_solution(self, scaled_model, original_model):
203 """
204 This method takes the solution in scaled_model and maps it back to the original model.
205
206 It will also transform duals and reduced costs if the suffixes 'dual' and/or 'rc' are present.
207 The :code:`scaled_model` argument must be a model that was already scaled using this transformation
208 as it expects data from the transformation to perform the back mapping.
209
210 Parameters
211 ----------
212 scaled_model : Pyomo Model
213 The model that was previously scaled with this transformation
214 original_model : Pyomo Model
215 The original unscaled source model
216
217 """
218 if not hasattr(scaled_model, 'component_scaling_factor_map'):
219 raise AttributeError('ScaleModel:propagate_solution called with scaled_model that does not '
220 'have a component_scaling_factor_map. It is possible this method was called '
221 'using a model that was not scaled with the ScaleModel transformation')
222 if not hasattr(scaled_model, 'scaled_component_to_original_name_map'):
223 raise AttributeError('ScaleModel:propagate_solution called with scaled_model that does not '
224 'have a scaled_component_to_original_name_map. It is possible this method was called '
225 'using a model that was not scaled with the ScaleModel transformation')
226
227 component_scaling_factor_map = scaled_model.component_scaling_factor_map
228 scaled_component_to_original_name_map = scaled_model.scaled_component_to_original_name_map
229
230 # get the objective scaling factor
231 scaled_objectives = list(scaled_model.component_data_objects(ctype=Objective, active=True, descend_into=True))
232 if len(scaled_objectives) != 1:
233 raise NotImplementedError(
234 'ScaleModel.propagate_solution requires a single active objective function, but %d objectives found.' % (
235 len(objectives)))
236 objective_scaling_factor = component_scaling_factor_map[scaled_objectives[0]]
237
238 # transfer the variable values and reduced costs
239 check_reduced_costs = type(scaled_model.component('rc')) is Suffix
240 for scaled_v in scaled_model.component_objects(ctype=Var, descend_into=True):
241 # get the unscaled_v from the original model
242 original_v_path = scaled_component_to_original_name_map[scaled_v]
243 original_v = original_model.find_component(original_v_path)
244
245 for k in scaled_v:
246 original_v[k].value = value(scaled_v[k]) / component_scaling_factor_map[scaled_v[k]]
247 if check_reduced_costs and scaled_v[k] in scaled_model.rc:
248 original_model.rc[original_v[k]] = scaled_model.rc[scaled_v[k]] * component_scaling_factor_map[
249 scaled_v[k]] / objective_scaling_factor
250
251 # transfer the duals
252 if type(scaled_model.component('dual')) is Suffix and type(original_model.component('dual')) is Suffix:
253 for scaled_c in scaled_model.component_objects(ctype=Constraint, descend_into=True):
254 original_c = original_model.find_component(scaled_component_to_original_name_map[scaled_c])
255
256 for k in scaled_c:
257 original_model.dual[original_c[k]] = scaled_model.dual[scaled_c[k]] * component_scaling_factor_map[
258 scaled_c[k]] / objective_scaling_factor
259
[end of pyomo/core/plugins/transform/scaling.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pyomo/core/plugins/transform/scaling.py b/pyomo/core/plugins/transform/scaling.py
--- a/pyomo/core/plugins/transform/scaling.py
+++ b/pyomo/core/plugins/transform/scaling.py
@@ -99,7 +99,7 @@
% (scaling_factor, component_data))
return scaling_factor
- def _apply_to(self, model, **kwds):
+ def _apply_to(self, model, rename=True):
# create a map of component to scaling factor
component_scaling_factor_map = ComponentMap()
@@ -117,19 +117,34 @@
raise ValueError("ScaleModel transformation: unknown scaling_method found"
"-- supported values: 'user' ")
- # rename all the Vars, Constraints, and Objectives from foo to scaled_foo
- scaled_component_to_original_name_map = \
- rename_components(model=model,
- component_list=list(model.component_objects(ctype=[Var, Constraint, Objective])),
- prefix='scaled_')
+ if rename:
+ # rename all the Vars, Constraints, and Objectives
+ # from foo to scaled_foo
+ component_list = list(model.component_objects(
+ ctype=[Var, Constraint, Objective]))
+ scaled_component_to_original_name_map = rename_components(
+ model=model,
+ component_list=component_list,
+ prefix='scaled_',
+ )
+ else:
+ scaled_component_to_original_name_map = ComponentMap(
+ [(comp, comp.name) for comp in
+ model.component_objects(
+ ctype=[Var,Constraint, Objective])]
+ )
# scale the variable bounds and values and build the variable substitution map
# for scaling vars in constraints
variable_substitution_map = ComponentMap()
+ already_scaled = set()
for variable in [var for var in model.component_objects(ctype=Var, descend_into=True)]:
# set the bounds/value for the scaled variable
for k in variable:
v = variable[k]
+ if id(v) in already_scaled:
+ continue
+ already_scaled.add(id(v))
scaling_factor = component_scaling_factor_map[v]
variable_substitution_map[v] = v / scaling_factor
@@ -156,9 +171,13 @@
variable_substitution_dict = {id(k):variable_substitution_map[k]
for k in variable_substitution_map}
+ already_scaled = set()
for component in model.component_objects(ctype=(Constraint, Objective), descend_into=True):
for k in component:
c = component[k]
+ if id(c) in already_scaled:
+ continue
+ already_scaled.add(id(c))
# perform the constraint/objective scaling and variable sub
scaling_factor = component_scaling_factor_map[c]
if isinstance(c, _ConstraintData):
@@ -240,6 +259,7 @@
for scaled_v in scaled_model.component_objects(ctype=Var, descend_into=True):
# get the unscaled_v from the original model
original_v_path = scaled_component_to_original_name_map[scaled_v]
+ # This will not work if decimal indices are present:
original_v = original_model.find_component(original_v_path)
for k in scaled_v:
| {"golden_diff": "diff --git a/pyomo/core/plugins/transform/scaling.py b/pyomo/core/plugins/transform/scaling.py\n--- a/pyomo/core/plugins/transform/scaling.py\n+++ b/pyomo/core/plugins/transform/scaling.py\n@@ -99,7 +99,7 @@\n % (scaling_factor, component_data))\n return scaling_factor\n \n- def _apply_to(self, model, **kwds):\n+ def _apply_to(self, model, rename=True):\n # create a map of component to scaling factor\n component_scaling_factor_map = ComponentMap()\n \n@@ -117,19 +117,34 @@\n raise ValueError(\"ScaleModel transformation: unknown scaling_method found\"\n \"-- supported values: 'user' \")\n \n- # rename all the Vars, Constraints, and Objectives from foo to scaled_foo\n- scaled_component_to_original_name_map = \\\n- rename_components(model=model,\n- component_list=list(model.component_objects(ctype=[Var, Constraint, Objective])),\n- prefix='scaled_')\n+ if rename:\n+ # rename all the Vars, Constraints, and Objectives\n+ # from foo to scaled_foo\n+ component_list = list(model.component_objects(\n+ ctype=[Var, Constraint, Objective]))\n+ scaled_component_to_original_name_map = rename_components(\n+ model=model,\n+ component_list=component_list,\n+ prefix='scaled_',\n+ )\n+ else:\n+ scaled_component_to_original_name_map = ComponentMap(\n+ [(comp, comp.name) for comp in \n+ model.component_objects(\n+ ctype=[Var,Constraint, Objective])]\n+ )\n \n # scale the variable bounds and values and build the variable substitution map\n # for scaling vars in constraints\n variable_substitution_map = ComponentMap()\n+ already_scaled = set()\n for variable in [var for var in model.component_objects(ctype=Var, descend_into=True)]:\n # set the bounds/value for the scaled variable\n for k in variable:\n v = variable[k]\n+ if id(v) in already_scaled:\n+ continue\n+ already_scaled.add(id(v))\n scaling_factor = component_scaling_factor_map[v]\n variable_substitution_map[v] = v / scaling_factor\n \n@@ -156,9 +171,13 @@\n variable_substitution_dict = {id(k):variable_substitution_map[k]\n for k in variable_substitution_map}\n \n+ already_scaled = set()\n for component in model.component_objects(ctype=(Constraint, Objective), descend_into=True):\n for k in component:\n c = component[k]\n+ if id(c) in already_scaled:\n+ continue\n+ already_scaled.add(id(c))\n # perform the constraint/objective scaling and variable sub\n scaling_factor = component_scaling_factor_map[c]\n if isinstance(c, _ConstraintData):\n@@ -240,6 +259,7 @@\n for scaled_v in scaled_model.component_objects(ctype=Var, descend_into=True):\n # get the unscaled_v from the original model\n original_v_path = scaled_component_to_original_name_map[scaled_v]\n+ # This will not work if decimal indices are present:\n original_v = original_model.find_component(original_v_path)\n \n for k in scaled_v:\n", "issue": "Scaling transformation does not work with References\nReferences break the scaling transformation because it renames the underlying components, then tries to scale the now-broken `Reference` components. Some options:\r\n1. Rename all components (breaking references), then don't try to scale references\r\n2. Don't rename components\r\n3. Create new, renamed components but don't delete the old components\r\n\r\nI think I'm in favor of option 1 for now as long as we document that the scaling transformation breaks references. Maybe we can have a `rename_scaled_components` option?\n", "before_files": [{"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and \n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain \n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\nfrom pyomo.common.collections import ComponentMap\nfrom pyomo.core.base import Var, Constraint, Objective, _ConstraintData, _ObjectiveData, Suffix, value\nfrom pyomo.core.plugins.transform.hierarchy import Transformation\nfrom pyomo.core.base import TransformationFactory\nfrom pyomo.core.expr.current import replace_expressions\nfrom pyomo.util.components import rename_components\n\n\[email protected]('core.scale_model',\n doc=\"Scale model variables, constraints, and objectives.\")\nclass ScaleModel(Transformation):\n \"\"\"\n Transformation to scale a model.\n\n This plugin performs variable, constraint, and objective scaling on\n a model based on the scaling factors in the suffix 'scaling_parameter'\n set for the variables, constraints, and/or objective. This is typically \n done to scale the problem for improved numerical properties.\n\n Supported transformation methods:\n * :py:meth:`apply_to <pyomo.core.plugins.transform.scaling.ScaleModel.apply_to>`\n * :py:meth:`create_using <pyomo.core.plugins.transform.scaling.ScaleModel.create_using>`\n * :py:meth:`propagate_solution <pyomo.core.plugins.transform.scaling.ScaleModel.propagate_solution>`\n\n\n Examples\n --------\n\n .. doctest::\n\n >>> from pyomo.environ import *\n >>> # create the model\n >>> model = ConcreteModel()\n >>> model.x = Var(bounds=(-5, 5), initialize=1.0)\n >>> model.y = Var(bounds=(0, 1), initialize=1.0) \n >>> model.obj = Objective(expr=1e8*model.x + 1e6*model.y)\n >>> model.con = Constraint(expr=model.x + model.y == 1.0)\n >>> # create the scaling factors\n >>> model.scaling_factor = Suffix(direction=Suffix.EXPORT)\n >>> model.scaling_factor[model.obj] = 1e-6 # scale the objective\n >>> model.scaling_factor[model.con] = 2.0 # scale the constraint\n >>> model.scaling_factor[model.x] = 0.2 # scale the x variable\n >>> # transform the model \n >>> scaled_model = TransformationFactory('core.scale_model').create_using(model)\n >>> # print the value of the objective function to show scaling has occurred\n >>> print(value(model.x))\n 1.0\n >>> print(value(scaled_model.scaled_x))\n 0.2\n >>> print(value(scaled_model.scaled_x.lb))\n -1.0\n >>> print(value(model.obj))\n 101000000.0\n >>> print(value(scaled_model.scaled_obj))\n 101.0\n\n ToDo\n ====\n - implement an option to change the variables names or not\n\n \"\"\"\n\n def __init__(self, **kwds):\n kwds['name'] = \"scale_model\"\n self._scaling_method = kwds.pop('scaling_method', 'user')\n super(ScaleModel, self).__init__(**kwds)\n\n def _create_using(self, original_model, **kwds):\n scaled_model = original_model.clone()\n self._apply_to(scaled_model, **kwds)\n return scaled_model\n\n def _get_float_scaling_factor(self, instance, component_data):\n scaling_factor = None\n if component_data in instance.scaling_factor:\n scaling_factor = instance.scaling_factor[component_data]\n elif component_data.parent_component() in instance.scaling_factor:\n scaling_factor = instance.scaling_factor[component_data.parent_component()]\n\n if scaling_factor is None:\n return 1.0\n\n try:\n scaling_factor = float(scaling_factor)\n except ValueError:\n raise ValueError(\n \"Suffix 'scaling_factor' has a value %s for component %s that cannot be converted to a float. \"\n \"Floating point values are required for this suffix in the ScaleModel transformation.\"\n % (scaling_factor, component_data))\n return scaling_factor\n\n def _apply_to(self, model, **kwds):\n # create a map of component to scaling factor\n component_scaling_factor_map = ComponentMap()\n\n # if the scaling_method is 'user', get the scaling parameters from the suffixes\n if self._scaling_method == 'user':\n # perform some checks to make sure we have the necessary suffixes\n if type(model.component('scaling_factor')) is not Suffix:\n raise ValueError(\"ScaleModel transformation called with scaling_method='user'\"\n \", but cannot find the suffix 'scaling_factor' on the model\")\n\n # get the scaling factors\n for c in model.component_data_objects(ctype=(Var, Constraint, Objective), descend_into=True):\n component_scaling_factor_map[c] = self._get_float_scaling_factor(model, c)\n else:\n raise ValueError(\"ScaleModel transformation: unknown scaling_method found\"\n \"-- supported values: 'user' \")\n\n # rename all the Vars, Constraints, and Objectives from foo to scaled_foo\n scaled_component_to_original_name_map = \\\n rename_components(model=model,\n component_list=list(model.component_objects(ctype=[Var, Constraint, Objective])),\n prefix='scaled_')\n\n # scale the variable bounds and values and build the variable substitution map\n # for scaling vars in constraints\n variable_substitution_map = ComponentMap()\n for variable in [var for var in model.component_objects(ctype=Var, descend_into=True)]:\n # set the bounds/value for the scaled variable\n for k in variable:\n v = variable[k]\n scaling_factor = component_scaling_factor_map[v]\n variable_substitution_map[v] = v / scaling_factor\n\n if v.lb is not None:\n v.setlb(v.lb * scaling_factor)\n if v.ub is not None:\n v.setub(v.ub * scaling_factor)\n if scaling_factor < 0:\n temp = v.lb\n v.setlb(v.ub)\n v.setub(temp)\n\n if v.value is not None:\n v.value = value(v) * scaling_factor\n\n # scale the objectives/constraints and perform the scaled variable substitution\n scale_constraint_dual = False\n if type(model.component('dual')) is Suffix:\n scale_constraint_dual = True\n\n # translate the variable_substitution_map (ComponentMap)\n # to variable_substition_dict (key: id() of component)\n # ToDo: We should change replace_expressions to accept a ComponentMap as well\n variable_substitution_dict = {id(k):variable_substitution_map[k]\n for k in variable_substitution_map}\n\n for component in model.component_objects(ctype=(Constraint, Objective), descend_into=True):\n for k in component:\n c = component[k]\n # perform the constraint/objective scaling and variable sub\n scaling_factor = component_scaling_factor_map[c]\n if isinstance(c, _ConstraintData):\n body = scaling_factor * \\\n replace_expressions(expr=c.body,\n substitution_map=variable_substitution_dict,\n descend_into_named_expressions=True,\n remove_named_expressions=True)\n\n # scale the rhs\n if c._lower is not None:\n c._lower = c._lower * scaling_factor\n if c._upper is not None:\n c._upper = c._upper * scaling_factor\n\n if scaling_factor < 0:\n c._lower, c._upper = c._upper, c._lower\n\n if scale_constraint_dual and c in model.dual:\n dual_value = model.dual[c]\n if dual_value is not None:\n model.dual[c] = dual_value / scaling_factor\n\n c.set_value((c._lower, body, c._upper))\n\n elif isinstance(c, _ObjectiveData):\n c.expr = scaling_factor * \\\n replace_expressions(expr=c.expr,\n substitution_map=variable_substitution_dict,\n descend_into_named_expressions=True,\n remove_named_expressions=True)\n else:\n raise NotImplementedError(\n 'Unknown object type found when applying scaling factors in ScaleModel transformation - Internal Error')\n\n model.component_scaling_factor_map = component_scaling_factor_map\n model.scaled_component_to_original_name_map = scaled_component_to_original_name_map\n\n return model\n\n def propagate_solution(self, scaled_model, original_model):\n \"\"\"\n This method takes the solution in scaled_model and maps it back to the original model.\n\n It will also transform duals and reduced costs if the suffixes 'dual' and/or 'rc' are present.\n The :code:`scaled_model` argument must be a model that was already scaled using this transformation\n as it expects data from the transformation to perform the back mapping.\n\n Parameters\n ----------\n scaled_model : Pyomo Model\n The model that was previously scaled with this transformation\n original_model : Pyomo Model\n The original unscaled source model\n\n \"\"\"\n if not hasattr(scaled_model, 'component_scaling_factor_map'):\n raise AttributeError('ScaleModel:propagate_solution called with scaled_model that does not '\n 'have a component_scaling_factor_map. It is possible this method was called '\n 'using a model that was not scaled with the ScaleModel transformation')\n if not hasattr(scaled_model, 'scaled_component_to_original_name_map'):\n raise AttributeError('ScaleModel:propagate_solution called with scaled_model that does not '\n 'have a scaled_component_to_original_name_map. It is possible this method was called '\n 'using a model that was not scaled with the ScaleModel transformation')\n\n component_scaling_factor_map = scaled_model.component_scaling_factor_map\n scaled_component_to_original_name_map = scaled_model.scaled_component_to_original_name_map\n\n # get the objective scaling factor\n scaled_objectives = list(scaled_model.component_data_objects(ctype=Objective, active=True, descend_into=True))\n if len(scaled_objectives) != 1:\n raise NotImplementedError(\n 'ScaleModel.propagate_solution requires a single active objective function, but %d objectives found.' % (\n len(objectives)))\n objective_scaling_factor = component_scaling_factor_map[scaled_objectives[0]]\n\n # transfer the variable values and reduced costs\n check_reduced_costs = type(scaled_model.component('rc')) is Suffix\n for scaled_v in scaled_model.component_objects(ctype=Var, descend_into=True):\n # get the unscaled_v from the original model\n original_v_path = scaled_component_to_original_name_map[scaled_v]\n original_v = original_model.find_component(original_v_path)\n\n for k in scaled_v:\n original_v[k].value = value(scaled_v[k]) / component_scaling_factor_map[scaled_v[k]]\n if check_reduced_costs and scaled_v[k] in scaled_model.rc:\n original_model.rc[original_v[k]] = scaled_model.rc[scaled_v[k]] * component_scaling_factor_map[\n scaled_v[k]] / objective_scaling_factor\n\n # transfer the duals\n if type(scaled_model.component('dual')) is Suffix and type(original_model.component('dual')) is Suffix:\n for scaled_c in scaled_model.component_objects(ctype=Constraint, descend_into=True):\n original_c = original_model.find_component(scaled_component_to_original_name_map[scaled_c])\n\n for k in scaled_c:\n original_model.dual[original_c[k]] = scaled_model.dual[scaled_c[k]] * component_scaling_factor_map[\n scaled_c[k]] / objective_scaling_factor\n", "path": "pyomo/core/plugins/transform/scaling.py"}]} | 3,853 | 708 |
gh_patches_debug_2401 | rasdani/github-patches | git_diff | numpy__numpy-15672 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
FIXME in `numpy/__init__.py` related to `numpy.lib` imports
There is a FIXME comment in `numpy/__init__.py` that doesn't seem to have a corresponding issue on GitHub, at least not one that I noticed with a cursory search of the issues.
https://github.com/numpy/numpy/blob/eb167a3fe540780f397a14817f54a95333fbcc6c/numpy/__init__.py#L140-L145
There is additional code in `numpy/__init__.py` related to this FIXME:
https://github.com/numpy/numpy/blob/eb167a3fe540780f397a14817f54a95333fbcc6c/numpy/__init__.py#L178-L184
My intent is getting this into the issue tracker so that it can be discussed/documented and synced up with the code comments. If there is an existing issue that I missed, I'd recommend updating the comment in `numpy/__init__.py` to point there.
</issue>
<code>
[start of numpy/__init__.py]
1 """
2 NumPy
3 =====
4
5 Provides
6 1. An array object of arbitrary homogeneous items
7 2. Fast mathematical operations over arrays
8 3. Linear Algebra, Fourier Transforms, Random Number Generation
9
10 How to use the documentation
11 ----------------------------
12 Documentation is available in two forms: docstrings provided
13 with the code, and a loose standing reference guide, available from
14 `the NumPy homepage <https://www.scipy.org>`_.
15
16 We recommend exploring the docstrings using
17 `IPython <https://ipython.org>`_, an advanced Python shell with
18 TAB-completion and introspection capabilities. See below for further
19 instructions.
20
21 The docstring examples assume that `numpy` has been imported as `np`::
22
23 >>> import numpy as np
24
25 Code snippets are indicated by three greater-than signs::
26
27 >>> x = 42
28 >>> x = x + 1
29
30 Use the built-in ``help`` function to view a function's docstring::
31
32 >>> help(np.sort)
33 ... # doctest: +SKIP
34
35 For some objects, ``np.info(obj)`` may provide additional help. This is
36 particularly true if you see the line "Help on ufunc object:" at the top
37 of the help() page. Ufuncs are implemented in C, not Python, for speed.
38 The native Python help() does not know how to view their help, but our
39 np.info() function does.
40
41 To search for documents containing a keyword, do::
42
43 >>> np.lookfor('keyword')
44 ... # doctest: +SKIP
45
46 General-purpose documents like a glossary and help on the basic concepts
47 of numpy are available under the ``doc`` sub-module::
48
49 >>> from numpy import doc
50 >>> help(doc)
51 ... # doctest: +SKIP
52
53 Available subpackages
54 ---------------------
55 doc
56 Topical documentation on broadcasting, indexing, etc.
57 lib
58 Basic functions used by several sub-packages.
59 random
60 Core Random Tools
61 linalg
62 Core Linear Algebra Tools
63 fft
64 Core FFT routines
65 polynomial
66 Polynomial tools
67 testing
68 NumPy testing tools
69 f2py
70 Fortran to Python Interface Generator.
71 distutils
72 Enhancements to distutils with support for
73 Fortran compilers support and more.
74
75 Utilities
76 ---------
77 test
78 Run numpy unittests
79 show_config
80 Show numpy build configuration
81 dual
82 Overwrite certain functions with high-performance Scipy tools
83 matlib
84 Make everything matrices.
85 __version__
86 NumPy version string
87
88 Viewing documentation using IPython
89 -----------------------------------
90 Start IPython with the NumPy profile (``ipython -p numpy``), which will
91 import `numpy` under the alias `np`. Then, use the ``cpaste`` command to
92 paste examples into the shell. To see which functions are available in
93 `numpy`, type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use
94 ``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow
95 down the list. To view the docstring for a function, use
96 ``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view
97 the source code).
98
99 Copies vs. in-place operation
100 -----------------------------
101 Most of the functions in `numpy` return a copy of the array argument
102 (e.g., `np.sort`). In-place versions of these functions are often
103 available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.
104 Exceptions to this rule are documented.
105
106 """
107 import sys
108 import warnings
109
110 from ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning
111 from ._globals import _NoValue
112
113 # We first need to detect if we're being called as part of the numpy setup
114 # procedure itself in a reliable manner.
115 try:
116 __NUMPY_SETUP__
117 except NameError:
118 __NUMPY_SETUP__ = False
119
120 if __NUMPY_SETUP__:
121 sys.stderr.write('Running from numpy source directory.\n')
122 else:
123 try:
124 from numpy.__config__ import show as show_config
125 except ImportError:
126 msg = """Error importing numpy: you should not try to import numpy from
127 its source directory; please exit the numpy source tree, and relaunch
128 your python interpreter from there."""
129 raise ImportError(msg)
130
131 from .version import git_revision as __git_revision__
132 from .version import version as __version__
133
134 __all__ = ['ModuleDeprecationWarning',
135 'VisibleDeprecationWarning']
136
137 # Allow distributors to run custom init code
138 from . import _distributor_init
139
140 from . import core
141 from .core import *
142 from . import compat
143 from . import lib
144 # FIXME: why have numpy.lib if everything is imported here??
145 from .lib import *
146
147 from . import linalg
148 from . import fft
149 from . import polynomial
150 from . import random
151 from . import ctypeslib
152 from . import ma
153 from . import matrixlib as _mat
154 from .matrixlib import *
155 from .compat import long
156
157 # Make these accessible from numpy name-space
158 # but not imported in from numpy import *
159 # TODO[gh-6103]: Deprecate these
160 from builtins import bool, int, float, complex, object, str
161 unicode = str
162
163 from .core import round, abs, max, min
164 # now that numpy modules are imported, can initialize limits
165 core.getlimits._register_known_types()
166
167 __all__.extend(['__version__', 'show_config'])
168 __all__.extend(core.__all__)
169 __all__.extend(_mat.__all__)
170 __all__.extend(lib.__all__)
171 __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
172
173 # These are added by `from .core import *` and `core.__all__`, but we
174 # overwrite them above with builtins we do _not_ want to export.
175 __all__.remove('long')
176 __all__.remove('unicode')
177
178 # Remove things that are in the numpy.lib but not in the numpy namespace
179 # Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)
180 # that prevents adding more things to the main namespace by accident.
181 # The list below will grow until the `from .lib import *` fixme above is
182 # taken care of
183 __all__.remove('Arrayterator')
184 del Arrayterator
185
186 # Filter out Cython harmless warnings
187 warnings.filterwarnings("ignore", message="numpy.dtype size changed")
188 warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
189 warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
190
191 # oldnumeric and numarray were removed in 1.9. In case some packages import
192 # but do not use them, we define them here for backward compatibility.
193 oldnumeric = 'removed'
194 numarray = 'removed'
195
196 if sys.version_info[:2] >= (3, 7):
197 # Importing Tester requires importing all of UnitTest which is not a
198 # cheap import Since it is mainly used in test suits, we lazy import it
199 # here to save on the order of 10 ms of import time for most users
200 #
201 # The previous way Tester was imported also had a side effect of adding
202 # the full `numpy.testing` namespace
203 #
204 # module level getattr is only supported in 3.7 onwards
205 # https://www.python.org/dev/peps/pep-0562/
206 def __getattr__(attr):
207 if attr == 'testing':
208 import numpy.testing as testing
209 return testing
210 elif attr == 'Tester':
211 from .testing import Tester
212 return Tester
213 else:
214 raise AttributeError("module {!r} has no attribute "
215 "{!r}".format(__name__, attr))
216
217 def __dir__():
218 return list(globals().keys() | {'Tester', 'testing'})
219
220 else:
221 # We don't actually use this ourselves anymore, but I'm not 100% sure that
222 # no-one else in the world is using it (though I hope not)
223 from .testing import Tester
224
225 # Pytest testing
226 from numpy._pytesttester import PytestTester
227 test = PytestTester(__name__)
228 del PytestTester
229
230
231 def _sanity_check():
232 """
233 Quick sanity checks for common bugs caused by environment.
234 There are some cases e.g. with wrong BLAS ABI that cause wrong
235 results under specific runtime conditions that are not necessarily
236 achieved during test suite runs, and it is useful to catch those early.
237
238 See https://github.com/numpy/numpy/issues/8577 and other
239 similar bug reports.
240
241 """
242 try:
243 x = ones(2, dtype=float32)
244 if not abs(x.dot(x) - 2.0) < 1e-5:
245 raise AssertionError()
246 except AssertionError:
247 msg = ("The current Numpy installation ({!r}) fails to "
248 "pass simple sanity checks. This can be caused for example "
249 "by incorrect BLAS library being linked in, or by mixing "
250 "package managers (pip, conda, apt, ...). Search closed "
251 "numpy issues for similar problems.")
252 raise RuntimeError(msg.format(__file__))
253
254 _sanity_check()
255 del _sanity_check
256
[end of numpy/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/numpy/__init__.py b/numpy/__init__.py
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -141,7 +141,8 @@
from .core import *
from . import compat
from . import lib
- # FIXME: why have numpy.lib if everything is imported here??
+ # NOTE: to be revisited following future namespace cleanup.
+ # See gh-14454 and gh-15672 for discussion.
from .lib import *
from . import linalg
| {"golden_diff": "diff --git a/numpy/__init__.py b/numpy/__init__.py\n--- a/numpy/__init__.py\n+++ b/numpy/__init__.py\n@@ -141,7 +141,8 @@\n from .core import *\n from . import compat\n from . import lib\n- # FIXME: why have numpy.lib if everything is imported here??\n+ # NOTE: to be revisited following future namespace cleanup.\n+ # See gh-14454 and gh-15672 for discussion.\n from .lib import *\n \n from . import linalg\n", "issue": "FIXME in `numpy/__init__.py` related to `numpy.lib` imports\nThere is a FIXME comment in `numpy/__init__.py` that doesn't seem to have a corresponding issue on GitHub, at least not one that I noticed with a cursory search of the issues.\r\n\r\nhttps://github.com/numpy/numpy/blob/eb167a3fe540780f397a14817f54a95333fbcc6c/numpy/__init__.py#L140-L145\r\n\r\nThere is additional code in `numpy/__init__.py` related to this FIXME:\r\n\r\nhttps://github.com/numpy/numpy/blob/eb167a3fe540780f397a14817f54a95333fbcc6c/numpy/__init__.py#L178-L184\r\n\r\nMy intent is getting this into the issue tracker so that it can be discussed/documented and synced up with the code comments. If there is an existing issue that I missed, I'd recommend updating the comment in `numpy/__init__.py` to point there.\n", "before_files": [{"content": "\"\"\"\nNumPy\n=====\n\nProvides\n 1. An array object of arbitrary homogeneous items\n 2. Fast mathematical operations over arrays\n 3. Linear Algebra, Fourier Transforms, Random Number Generation\n\nHow to use the documentation\n----------------------------\nDocumentation is available in two forms: docstrings provided\nwith the code, and a loose standing reference guide, available from\n`the NumPy homepage <https://www.scipy.org>`_.\n\nWe recommend exploring the docstrings using\n`IPython <https://ipython.org>`_, an advanced Python shell with\nTAB-completion and introspection capabilities. See below for further\ninstructions.\n\nThe docstring examples assume that `numpy` has been imported as `np`::\n\n >>> import numpy as np\n\nCode snippets are indicated by three greater-than signs::\n\n >>> x = 42\n >>> x = x + 1\n\nUse the built-in ``help`` function to view a function's docstring::\n\n >>> help(np.sort)\n ... # doctest: +SKIP\n\nFor some objects, ``np.info(obj)`` may provide additional help. This is\nparticularly true if you see the line \"Help on ufunc object:\" at the top\nof the help() page. Ufuncs are implemented in C, not Python, for speed.\nThe native Python help() does not know how to view their help, but our\nnp.info() function does.\n\nTo search for documents containing a keyword, do::\n\n >>> np.lookfor('keyword')\n ... # doctest: +SKIP\n\nGeneral-purpose documents like a glossary and help on the basic concepts\nof numpy are available under the ``doc`` sub-module::\n\n >>> from numpy import doc\n >>> help(doc)\n ... # doctest: +SKIP\n\nAvailable subpackages\n---------------------\ndoc\n Topical documentation on broadcasting, indexing, etc.\nlib\n Basic functions used by several sub-packages.\nrandom\n Core Random Tools\nlinalg\n Core Linear Algebra Tools\nfft\n Core FFT routines\npolynomial\n Polynomial tools\ntesting\n NumPy testing tools\nf2py\n Fortran to Python Interface Generator.\ndistutils\n Enhancements to distutils with support for\n Fortran compilers support and more.\n\nUtilities\n---------\ntest\n Run numpy unittests\nshow_config\n Show numpy build configuration\ndual\n Overwrite certain functions with high-performance Scipy tools\nmatlib\n Make everything matrices.\n__version__\n NumPy version string\n\nViewing documentation using IPython\n-----------------------------------\nStart IPython with the NumPy profile (``ipython -p numpy``), which will\nimport `numpy` under the alias `np`. Then, use the ``cpaste`` command to\npaste examples into the shell. To see which functions are available in\n`numpy`, type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use\n``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow\ndown the list. To view the docstring for a function, use\n``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view\nthe source code).\n\nCopies vs. in-place operation\n-----------------------------\nMost of the functions in `numpy` return a copy of the array argument\n(e.g., `np.sort`). In-place versions of these functions are often\navailable as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.\nExceptions to this rule are documented.\n\n\"\"\"\nimport sys\nimport warnings\n\nfrom ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning\nfrom ._globals import _NoValue\n\n# We first need to detect if we're being called as part of the numpy setup\n# procedure itself in a reliable manner.\ntry:\n __NUMPY_SETUP__\nexcept NameError:\n __NUMPY_SETUP__ = False\n\nif __NUMPY_SETUP__:\n sys.stderr.write('Running from numpy source directory.\\n')\nelse:\n try:\n from numpy.__config__ import show as show_config\n except ImportError:\n msg = \"\"\"Error importing numpy: you should not try to import numpy from\n its source directory; please exit the numpy source tree, and relaunch\n your python interpreter from there.\"\"\"\n raise ImportError(msg)\n\n from .version import git_revision as __git_revision__\n from .version import version as __version__\n\n __all__ = ['ModuleDeprecationWarning',\n 'VisibleDeprecationWarning']\n\n # Allow distributors to run custom init code\n from . import _distributor_init\n\n from . import core\n from .core import *\n from . import compat\n from . import lib\n # FIXME: why have numpy.lib if everything is imported here??\n from .lib import *\n\n from . import linalg\n from . import fft\n from . import polynomial\n from . import random\n from . import ctypeslib\n from . import ma\n from . import matrixlib as _mat\n from .matrixlib import *\n from .compat import long\n\n # Make these accessible from numpy name-space\n # but not imported in from numpy import *\n # TODO[gh-6103]: Deprecate these\n from builtins import bool, int, float, complex, object, str\n unicode = str\n\n from .core import round, abs, max, min\n # now that numpy modules are imported, can initialize limits\n core.getlimits._register_known_types()\n\n __all__.extend(['__version__', 'show_config'])\n __all__.extend(core.__all__)\n __all__.extend(_mat.__all__)\n __all__.extend(lib.__all__)\n __all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])\n\n # These are added by `from .core import *` and `core.__all__`, but we\n # overwrite them above with builtins we do _not_ want to export.\n __all__.remove('long')\n __all__.remove('unicode')\n\n # Remove things that are in the numpy.lib but not in the numpy namespace\n # Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)\n # that prevents adding more things to the main namespace by accident.\n # The list below will grow until the `from .lib import *` fixme above is\n # taken care of\n __all__.remove('Arrayterator')\n del Arrayterator\n\n # Filter out Cython harmless warnings\n warnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\n warnings.filterwarnings(\"ignore\", message=\"numpy.ufunc size changed\")\n warnings.filterwarnings(\"ignore\", message=\"numpy.ndarray size changed\")\n\n # oldnumeric and numarray were removed in 1.9. In case some packages import\n # but do not use them, we define them here for backward compatibility.\n oldnumeric = 'removed'\n numarray = 'removed'\n\n if sys.version_info[:2] >= (3, 7):\n # Importing Tester requires importing all of UnitTest which is not a\n # cheap import Since it is mainly used in test suits, we lazy import it\n # here to save on the order of 10 ms of import time for most users\n #\n # The previous way Tester was imported also had a side effect of adding\n # the full `numpy.testing` namespace\n #\n # module level getattr is only supported in 3.7 onwards\n # https://www.python.org/dev/peps/pep-0562/\n def __getattr__(attr):\n if attr == 'testing':\n import numpy.testing as testing\n return testing\n elif attr == 'Tester':\n from .testing import Tester\n return Tester\n else:\n raise AttributeError(\"module {!r} has no attribute \"\n \"{!r}\".format(__name__, attr))\n\n def __dir__():\n return list(globals().keys() | {'Tester', 'testing'})\n\n else:\n # We don't actually use this ourselves anymore, but I'm not 100% sure that\n # no-one else in the world is using it (though I hope not)\n from .testing import Tester\n\n # Pytest testing\n from numpy._pytesttester import PytestTester\n test = PytestTester(__name__)\n del PytestTester\n\n\n def _sanity_check():\n \"\"\"\n Quick sanity checks for common bugs caused by environment.\n There are some cases e.g. with wrong BLAS ABI that cause wrong\n results under specific runtime conditions that are not necessarily\n achieved during test suite runs, and it is useful to catch those early.\n\n See https://github.com/numpy/numpy/issues/8577 and other\n similar bug reports.\n\n \"\"\"\n try:\n x = ones(2, dtype=float32)\n if not abs(x.dot(x) - 2.0) < 1e-5:\n raise AssertionError()\n except AssertionError:\n msg = (\"The current Numpy installation ({!r}) fails to \"\n \"pass simple sanity checks. This can be caused for example \"\n \"by incorrect BLAS library being linked in, or by mixing \"\n \"package managers (pip, conda, apt, ...). Search closed \"\n \"numpy issues for similar problems.\")\n raise RuntimeError(msg.format(__file__))\n\n _sanity_check()\n del _sanity_check\n", "path": "numpy/__init__.py"}]} | 3,522 | 134 |
gh_patches_debug_3236 | rasdani/github-patches | git_diff | pre-commit__pre-commit-1092 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Globally disable color?
I cannot find the way to globally disable color in the pre-commit output. Setting only the background color to green and not changing the foreground color does not work for my terminal with the following settings in the Xt resources (as set in the `${HOME}/.Xresources` file):
````properties
Rxvt.background: black
Rxvt.foreground: deepSkyBlue
````
Is there a way? It would be great to respect https://no-color.org/ environment variable. And, while we are here, maybe understand the following git config setting:
````ini
[color]
ui = never
````
</issue>
<code>
[start of pre_commit/color.py]
1 from __future__ import unicode_literals
2
3 import os
4 import sys
5
6 terminal_supports_color = True
7 if os.name == 'nt': # pragma: no cover (windows)
8 from pre_commit.color_windows import enable_virtual_terminal_processing
9 try:
10 enable_virtual_terminal_processing()
11 except WindowsError:
12 terminal_supports_color = False
13
14 RED = '\033[41m'
15 GREEN = '\033[42m'
16 YELLOW = '\033[43;30m'
17 TURQUOISE = '\033[46;30m'
18 NORMAL = '\033[0m'
19
20
21 class InvalidColorSetting(ValueError):
22 pass
23
24
25 def format_color(text, color, use_color_setting):
26 """Format text with color.
27
28 Args:
29 text - Text to be formatted with color if `use_color`
30 color - The color start string
31 use_color_setting - Whether or not to color
32 """
33 if not use_color_setting:
34 return text
35 else:
36 return '{}{}{}'.format(color, text, NORMAL)
37
38
39 COLOR_CHOICES = ('auto', 'always', 'never')
40
41
42 def use_color(setting):
43 """Choose whether to use color based on the command argument.
44
45 Args:
46 setting - Either `auto`, `always`, or `never`
47 """
48 if setting not in COLOR_CHOICES:
49 raise InvalidColorSetting(setting)
50
51 return (
52 setting == 'always' or
53 (setting == 'auto' and sys.stdout.isatty() and terminal_supports_color)
54 )
55
[end of pre_commit/color.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pre_commit/color.py b/pre_commit/color.py
--- a/pre_commit/color.py
+++ b/pre_commit/color.py
@@ -48,6 +48,9 @@
if setting not in COLOR_CHOICES:
raise InvalidColorSetting(setting)
+ if os.environ.get('NO_COLOR'):
+ return False
+
return (
setting == 'always' or
(setting == 'auto' and sys.stdout.isatty() and terminal_supports_color)
| {"golden_diff": "diff --git a/pre_commit/color.py b/pre_commit/color.py\n--- a/pre_commit/color.py\n+++ b/pre_commit/color.py\n@@ -48,6 +48,9 @@\n if setting not in COLOR_CHOICES:\n raise InvalidColorSetting(setting)\n \n+ if os.environ.get('NO_COLOR'):\n+ return False\n+\n return (\n setting == 'always' or\n (setting == 'auto' and sys.stdout.isatty() and terminal_supports_color)\n", "issue": "Globally disable color?\nI cannot find the way to globally disable color in the pre-commit output. Setting only the background color to green and not changing the foreground color does not work for my terminal with the following settings in the Xt resources (as set in the `${HOME}/.Xresources` file):\r\n\r\n````properties\r\nRxvt.background: black\r\nRxvt.foreground: deepSkyBlue\r\n````\r\n\r\nIs there a way? It would be great to respect https://no-color.org/ environment variable. And, while we are here, maybe understand the following git config setting:\r\n\r\n````ini\r\n[color]\r\n ui = never\r\n````\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport os\nimport sys\n\nterminal_supports_color = True\nif os.name == 'nt': # pragma: no cover (windows)\n from pre_commit.color_windows import enable_virtual_terminal_processing\n try:\n enable_virtual_terminal_processing()\n except WindowsError:\n terminal_supports_color = False\n\nRED = '\\033[41m'\nGREEN = '\\033[42m'\nYELLOW = '\\033[43;30m'\nTURQUOISE = '\\033[46;30m'\nNORMAL = '\\033[0m'\n\n\nclass InvalidColorSetting(ValueError):\n pass\n\n\ndef format_color(text, color, use_color_setting):\n \"\"\"Format text with color.\n\n Args:\n text - Text to be formatted with color if `use_color`\n color - The color start string\n use_color_setting - Whether or not to color\n \"\"\"\n if not use_color_setting:\n return text\n else:\n return '{}{}{}'.format(color, text, NORMAL)\n\n\nCOLOR_CHOICES = ('auto', 'always', 'never')\n\n\ndef use_color(setting):\n \"\"\"Choose whether to use color based on the command argument.\n\n Args:\n setting - Either `auto`, `always`, or `never`\n \"\"\"\n if setting not in COLOR_CHOICES:\n raise InvalidColorSetting(setting)\n\n return (\n setting == 'always' or\n (setting == 'auto' and sys.stdout.isatty() and terminal_supports_color)\n )\n", "path": "pre_commit/color.py"}]} | 1,104 | 102 |
gh_patches_debug_9632 | rasdani/github-patches | git_diff | pyqtgraph__pyqtgraph-1660 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
pip installation of pyqtgraph 0.12.0 does not bundle colors/maps directory
<!-- In the following, please describe your issue in detail! -->
<!-- If some of the sections do not apply, just remove them. -->
### Short description
<!-- This should summarize the issue. -->
In a pip install of pyqtgraph 0.12.0, the colors/maps subdirectory is not included in the install.
### Code to reproduce
<!-- Please provide a minimal working example that reproduces the issue in the code block below.
Ideally, this should be a full example someone else could run without additional setup. -->
```
py -m pyqtgraph.examples.colorMaps
```
### Expected behavior
<!-- What should happen? -->
Color Maps example should execute.
### Real behavior
<!-- What happens? -->
```
FileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\Users\\XYZ\\AppData\\Local\\Programs\Python\\Python38\\lib\\site-packages\\pyqtgraph\\colors\\maps'
```
### Tested environment(s)
* PyQtGraph version: 0.12.0
* Qt Python binding: PySide6 6.0.1 Qt 6.0.1
* Python version: 3.8.8
* NumPy version: 1.20.1
* Operating system: Windows
* Installation method: pip
### Additional context
The user can more easily trigger this by trying to run the Color Maps example from the ExampleApp GUI.
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2 DESCRIPTION = """\
3 PyQtGraph is a pure-python graphics and GUI library built on PyQt5/PySide2 and
4 numpy.
5
6 It is intended for use in mathematics / scientific / engineering applications.
7 Despite being written entirely in python, the library is very fast due to its
8 heavy leverage of numpy for number crunching, Qt's GraphicsView framework for
9 2D display, and OpenGL for 3D display.
10 """
11
12 setupOpts = dict(
13 name='pyqtgraph',
14 description='Scientific Graphics and GUI Library for Python',
15 long_description=DESCRIPTION,
16 license = 'MIT',
17 url='http://www.pyqtgraph.org',
18 author='Luke Campagnola',
19 author_email='[email protected]',
20 classifiers = [
21 "Programming Language :: Python",
22 "Programming Language :: Python :: 3",
23 "Development Status :: 4 - Beta",
24 "Environment :: Other Environment",
25 "Intended Audience :: Science/Research",
26 "License :: OSI Approved :: MIT License",
27 "Operating System :: OS Independent",
28 "Topic :: Software Development :: Libraries :: Python Modules",
29 "Topic :: Scientific/Engineering :: Visualization",
30 "Topic :: Software Development :: User Interfaces",
31 ],
32 )
33
34
35 import distutils.dir_util
36 from distutils.command import build
37 import os, sys, re
38 try:
39 import setuptools
40 from setuptools import setup
41 from setuptools.command import install
42 except ImportError:
43 sys.stderr.write("Warning: could not import setuptools; falling back to distutils.\n")
44 from distutils.core import setup
45 from distutils.command import install
46
47
48 # Work around mbcs bug in distutils.
49 # http://bugs.python.org/issue10945
50 import codecs
51 try:
52 codecs.lookup('mbcs')
53 except LookupError:
54 ascii = codecs.lookup('ascii')
55 func = lambda name, enc=ascii: {True: enc}.get(name=='mbcs')
56 codecs.register(func)
57
58
59 path = os.path.split(__file__)[0]
60 sys.path.insert(0, os.path.join(path, 'tools'))
61 import setupHelpers as helpers
62
63 ## generate list of all sub-packages
64 allPackages = (helpers.listAllPackages(pkgroot='pyqtgraph') +
65 ['pyqtgraph.'+x for x in helpers.listAllPackages(pkgroot='examples')])
66
67 ## Decide what version string to use in the build
68 version, forcedVersion, gitVersion, initVersion = helpers.getVersionStrings(pkg='pyqtgraph')
69
70
71
72 class Build(build.build):
73 """
74 * Clear build path before building
75 """
76 def run(self):
77 global path
78
79 ## Make sure build directory is clean
80 buildPath = os.path.join(path, self.build_lib)
81 if os.path.isdir(buildPath):
82 distutils.dir_util.remove_tree(buildPath)
83
84 ret = build.build.run(self)
85
86
87 class Install(install.install):
88 """
89 * Check for previously-installed version before installing
90 * Set version string in __init__ after building. This helps to ensure that we
91 know when an installation came from a non-release code base.
92 """
93 def run(self):
94 global path, version, initVersion, forcedVersion, installVersion
95
96 name = self.config_vars['dist_name']
97 path = os.path.join(self.install_libbase, 'pyqtgraph')
98 if os.path.exists(path):
99 raise Exception("It appears another version of %s is already "
100 "installed at %s; remove this before installing."
101 % (name, path))
102 print("Installing to %s" % path)
103 rval = install.install.run(self)
104
105
106 # If the version in __init__ is different from the automatically-generated
107 # version string, then we will update __init__ in the install directory
108 if initVersion == version:
109 return rval
110
111 try:
112 initfile = os.path.join(path, '__init__.py')
113 data = open(initfile, 'r').read()
114 open(initfile, 'w').write(re.sub(r"__version__ = .*", "__version__ = '%s'" % version, data))
115 installVersion = version
116 except:
117 sys.stderr.write("Warning: Error occurred while setting version string in build path. "
118 "Installation will use the original version string "
119 "%s instead.\n" % (initVersion)
120 )
121 if forcedVersion:
122 raise
123 installVersion = initVersion
124 sys.excepthook(*sys.exc_info())
125
126 return rval
127
128
129 setup(
130 version=version,
131 cmdclass={'build': Build,
132 'install': Install,
133 'deb': helpers.DebCommand,
134 'test': helpers.TestCommand,
135 'debug': helpers.DebugCommand,
136 'mergetest': helpers.MergeTestCommand,
137 'style': helpers.StyleCommand},
138 packages=allPackages,
139 python_requires=">=3.7",
140 package_dir={'pyqtgraph.examples': 'examples'}, ## install examples along with the rest of the source
141 package_data={'pyqtgraph.examples': ['optics/*.gz', 'relativity/presets/*.cfg'],
142 "pyqtgraph.icons": ["*.svg", "*.png"],
143 },
144 install_requires = [
145 'numpy>=1.17.0',
146 ],
147 **setupOpts
148 )
149
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -140,6 +140,7 @@
package_dir={'pyqtgraph.examples': 'examples'}, ## install examples along with the rest of the source
package_data={'pyqtgraph.examples': ['optics/*.gz', 'relativity/presets/*.cfg'],
"pyqtgraph.icons": ["*.svg", "*.png"],
+ "pyqtgraph": ["colors/maps/*.csv", "colors/maps/*.txt"],
},
install_requires = [
'numpy>=1.17.0',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -140,6 +140,7 @@\n package_dir={'pyqtgraph.examples': 'examples'}, ## install examples along with the rest of the source\n package_data={'pyqtgraph.examples': ['optics/*.gz', 'relativity/presets/*.cfg'],\n \"pyqtgraph.icons\": [\"*.svg\", \"*.png\"],\n+ \"pyqtgraph\": [\"colors/maps/*.csv\", \"colors/maps/*.txt\"],\n },\n install_requires = [\n 'numpy>=1.17.0',\n", "issue": "pip installation of pyqtgraph 0.12.0 does not bundle colors/maps directory\n<!-- In the following, please describe your issue in detail! -->\r\n<!-- If some of the sections do not apply, just remove them. -->\r\n\r\n### Short description\r\n<!-- This should summarize the issue. -->\r\n\r\nIn a pip install of pyqtgraph 0.12.0, the colors/maps subdirectory is not included in the install.\r\n\r\n### Code to reproduce\r\n<!-- Please provide a minimal working example that reproduces the issue in the code block below.\r\n Ideally, this should be a full example someone else could run without additional setup. -->\r\n```\r\npy -m pyqtgraph.examples.colorMaps\r\n```\r\n\r\n### Expected behavior\r\n<!-- What should happen? -->\r\n\r\nColor Maps example should execute.\r\n\r\n### Real behavior\r\n<!-- What happens? -->\r\n\r\n```\r\nFileNotFoundError: [WinError 3] The system cannot find the path specified: 'C:\\\\Users\\\\XYZ\\\\AppData\\\\Local\\\\Programs\\Python\\\\Python38\\\\lib\\\\site-packages\\\\pyqtgraph\\\\colors\\\\maps'\r\n```\r\n\r\n### Tested environment(s)\r\n\r\n * PyQtGraph version: 0.12.0\r\n * Qt Python binding: PySide6 6.0.1 Qt 6.0.1\r\n * Python version: 3.8.8\r\n * NumPy version: 1.20.1\r\n * Operating system: Windows\r\n * Installation method: pip\r\n\r\n### Additional context\r\nThe user can more easily trigger this by trying to run the Color Maps example from the ExampleApp GUI.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nDESCRIPTION = \"\"\"\\\nPyQtGraph is a pure-python graphics and GUI library built on PyQt5/PySide2 and\nnumpy. \n\nIt is intended for use in mathematics / scientific / engineering applications.\nDespite being written entirely in python, the library is very fast due to its\nheavy leverage of numpy for number crunching, Qt's GraphicsView framework for\n2D display, and OpenGL for 3D display.\n\"\"\"\n\nsetupOpts = dict(\n name='pyqtgraph',\n description='Scientific Graphics and GUI Library for Python',\n long_description=DESCRIPTION,\n license = 'MIT',\n url='http://www.pyqtgraph.org',\n author='Luke Campagnola',\n author_email='[email protected]',\n classifiers = [\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Development Status :: 4 - Beta\",\n \"Environment :: Other Environment\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Scientific/Engineering :: Visualization\",\n \"Topic :: Software Development :: User Interfaces\",\n ],\n)\n\n\nimport distutils.dir_util\nfrom distutils.command import build\nimport os, sys, re\ntry:\n import setuptools\n from setuptools import setup\n from setuptools.command import install\nexcept ImportError:\n sys.stderr.write(\"Warning: could not import setuptools; falling back to distutils.\\n\")\n from distutils.core import setup\n from distutils.command import install\n\n\n# Work around mbcs bug in distutils.\n# http://bugs.python.org/issue10945\nimport codecs\ntry:\n codecs.lookup('mbcs')\nexcept LookupError:\n ascii = codecs.lookup('ascii')\n func = lambda name, enc=ascii: {True: enc}.get(name=='mbcs')\n codecs.register(func)\n\n\npath = os.path.split(__file__)[0]\nsys.path.insert(0, os.path.join(path, 'tools'))\nimport setupHelpers as helpers\n\n## generate list of all sub-packages\nallPackages = (helpers.listAllPackages(pkgroot='pyqtgraph') + \n ['pyqtgraph.'+x for x in helpers.listAllPackages(pkgroot='examples')])\n\n## Decide what version string to use in the build\nversion, forcedVersion, gitVersion, initVersion = helpers.getVersionStrings(pkg='pyqtgraph')\n\n\n\nclass Build(build.build):\n \"\"\"\n * Clear build path before building\n \"\"\"\n def run(self):\n global path\n\n ## Make sure build directory is clean\n buildPath = os.path.join(path, self.build_lib)\n if os.path.isdir(buildPath):\n distutils.dir_util.remove_tree(buildPath)\n \n ret = build.build.run(self)\n \n\nclass Install(install.install):\n \"\"\"\n * Check for previously-installed version before installing\n * Set version string in __init__ after building. This helps to ensure that we\n know when an installation came from a non-release code base.\n \"\"\"\n def run(self):\n global path, version, initVersion, forcedVersion, installVersion\n \n name = self.config_vars['dist_name']\n path = os.path.join(self.install_libbase, 'pyqtgraph')\n if os.path.exists(path):\n raise Exception(\"It appears another version of %s is already \"\n \"installed at %s; remove this before installing.\" \n % (name, path))\n print(\"Installing to %s\" % path)\n rval = install.install.run(self)\n\n \n # If the version in __init__ is different from the automatically-generated\n # version string, then we will update __init__ in the install directory\n if initVersion == version:\n return rval\n \n try:\n initfile = os.path.join(path, '__init__.py')\n data = open(initfile, 'r').read()\n open(initfile, 'w').write(re.sub(r\"__version__ = .*\", \"__version__ = '%s'\" % version, data))\n installVersion = version\n except:\n sys.stderr.write(\"Warning: Error occurred while setting version string in build path. \"\n \"Installation will use the original version string \"\n \"%s instead.\\n\" % (initVersion)\n )\n if forcedVersion:\n raise\n installVersion = initVersion\n sys.excepthook(*sys.exc_info())\n \n return rval\n\n\nsetup(\n version=version,\n cmdclass={'build': Build, \n 'install': Install,\n 'deb': helpers.DebCommand, \n 'test': helpers.TestCommand,\n 'debug': helpers.DebugCommand,\n 'mergetest': helpers.MergeTestCommand,\n 'style': helpers.StyleCommand},\n packages=allPackages,\n python_requires=\">=3.7\",\n package_dir={'pyqtgraph.examples': 'examples'}, ## install examples along with the rest of the source\n package_data={'pyqtgraph.examples': ['optics/*.gz', 'relativity/presets/*.cfg'],\n \"pyqtgraph.icons\": [\"*.svg\", \"*.png\"],\n },\n install_requires = [\n 'numpy>=1.17.0',\n ],\n **setupOpts\n)\n", "path": "setup.py"}]} | 2,328 | 133 |
gh_patches_debug_27109 | rasdani/github-patches | git_diff | chainer__chainer-795 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Show more error messages when cython fails
Some users failed to run `cython` and it shows no message (I don't know the reason). In this case, `subprocess.check_call` shows nothing. We need to show an error message in `chainer_setup_build.py`.
</issue>
<code>
[start of chainer_setup_build.py]
1 from __future__ import print_function
2 import copy
3 import distutils
4 import os
5 from os import path
6 import shutil
7 import subprocess
8 import sys
9 import tempfile
10
11 import pkg_resources
12 import setuptools
13 from setuptools.command import build_ext
14
15
16 dummy_extension = setuptools.Extension('chainer', ['chainer.c'])
17
18 cython_version = '0.23.0'
19
20 MODULES = [
21 {
22 'name': 'cuda',
23 'file': [
24 'cupy.core.core',
25 'cupy.core.flags',
26 'cupy.cuda.cublas',
27 'cupy.cuda.curand',
28 'cupy.cuda.device',
29 'cupy.cuda.driver',
30 'cupy.cuda.memory',
31 'cupy.cuda.function',
32 'cupy.cuda.runtime',
33 'cupy.util',
34 ],
35 'include': [
36 'cublas_v2.h',
37 'cuda.h',
38 'cuda_runtime.h',
39 'curand.h',
40 ],
41 'libraries': [
42 'cublas',
43 'cuda',
44 'cudart',
45 'curand',
46 ],
47 },
48 {
49 'name': 'cudnn',
50 'file': [
51 'cupy.cuda.cudnn',
52 ],
53 'include': [
54 'cudnn.h',
55 ],
56 'libraries': [
57 'cudnn',
58 ],
59 }
60 ]
61
62
63 def get_compiler_setting():
64 nvcc_path = search_on_path(('nvcc', 'nvcc.exe'))
65 cuda_path_default = None
66 if nvcc_path is None:
67 print('**************************************************************')
68 print('*** WARNING: nvcc not in path.')
69 print('*** WARNING: Please set path to nvcc.')
70 print('**************************************************************')
71 else:
72 cuda_path_default = path.normpath(
73 path.join(path.dirname(nvcc_path), '..'))
74
75 cuda_path = os.environ.get('CUDA_PATH', '') # Nvidia default on Windows
76 if len(cuda_path) > 0 and cuda_path != cuda_path_default:
77 print('**************************************************************')
78 print('*** WARNING: nvcc path != CUDA_PATH')
79 print('*** WARNING: nvcc path: %s', cuda_path_default)
80 print('*** WARNING: CUDA_PATH: %s', cuda_path)
81 print('**************************************************************')
82
83 if not path.exists(cuda_path):
84 cuda_path = cuda_path_default
85
86 if not cuda_path and path.exists('/usr/local/cuda'):
87 cuda_path = '/usr/local/cuda'
88
89 include_dirs = []
90 library_dirs = []
91 define_macros = []
92
93 if cuda_path:
94 include_dirs.append(path.join(cuda_path, 'include'))
95 if sys.platform == 'win32':
96 library_dirs.append(path.join(cuda_path, 'bin'))
97 library_dirs.append(path.join(cuda_path, 'lib', 'x64'))
98 else:
99 library_dirs.append(path.join(cuda_path, 'lib64'))
100 library_dirs.append(path.join(cuda_path, 'lib'))
101 if sys.platform == 'darwin':
102 library_dirs.append('/usr/local/cuda/lib')
103
104 return {
105 'include_dirs': include_dirs,
106 'library_dirs': library_dirs,
107 'define_macros': define_macros,
108 'language': 'c++',
109 }
110
111
112 def localpath(*args):
113 return path.abspath(path.join(path.dirname(__file__), *args))
114
115
116 def get_path(key):
117 return os.environ.get(key, '').split(os.pathsep)
118
119
120 def search_on_path(filenames):
121 for p in get_path('PATH'):
122 for filename in filenames:
123 full = path.join(p, filename)
124 if path.exists(full):
125 return path.abspath(full)
126
127
128 def check_include(dirs, file_path):
129 return any(path.exists(path.join(dir, file_path)) for dir in dirs)
130
131
132 def check_readthedocs_environment():
133 return os.environ.get('READTHEDOCS', None) == 'True'
134
135
136 def check_library(compiler, includes=[], libraries=[],
137 include_dirs=[], library_dirs=[]):
138 temp_dir = tempfile.mkdtemp()
139
140 try:
141 source = '''
142 int main(int argc, char* argv[]) {
143 return 0;
144 }
145 '''
146 fname = os.path.join(temp_dir, 'a.cpp')
147 with open(fname, 'w') as f:
148 for header in includes:
149 f.write('#include <%s>\n' % header)
150 f.write(source)
151
152 try:
153 objects = compiler.compile([fname], output_dir=temp_dir,
154 include_dirs=include_dirs)
155 except distutils.errors.CompileError:
156 return False
157
158 try:
159 compiler.link_shared_lib(objects,
160 os.path.join(temp_dir, 'a'),
161 libraries=libraries,
162 library_dirs=library_dirs)
163 except (distutils.errors.LinkError, TypeError):
164 return False
165
166 return True
167
168 finally:
169 shutil.rmtree(temp_dir, ignore_errors=True)
170
171
172 def make_extensions(options, compiler):
173
174 """Produce a list of Extension instances which passed to cythonize()."""
175
176 no_cuda = options['no_cuda']
177 settings = get_compiler_setting()
178
179 try:
180 import numpy
181 numpy_include = numpy.get_include()
182 except AttributeError:
183 # if numpy is not installed get the headers from the .egg directory
184 import numpy.core
185 numpy_include = path.join(
186 path.dirname(numpy.core.__file__), 'include')
187 include_dirs = settings['include_dirs']
188 include_dirs.append(numpy_include)
189
190 settings['include_dirs'] = [
191 x for x in include_dirs if path.exists(x)]
192 settings['library_dirs'] = [
193 x for x in settings['library_dirs'] if path.exists(x)]
194 if sys.platform != 'win32':
195 settings['runtime_library_dirs'] = settings['library_dirs']
196
197 if options['linetrace']:
198 settings['define_macros'].append(('CYTHON_TRACE', '1'))
199 settings['define_macros'].append(('CYTHON_TRACE_NOGIL', '1'))
200 if no_cuda:
201 settings['define_macros'].append(('CUPY_NO_CUDA', '1'))
202
203 ret = []
204 for module in MODULES:
205 print('Include directories:', settings['include_dirs'])
206 print('Library directories:', settings['library_dirs'])
207
208 if not no_cuda:
209 if not check_library(compiler,
210 includes=module['include'],
211 include_dirs=settings['include_dirs']):
212 print('**************************************************')
213 print('*** Include files not found: %s' % module['include'])
214 print('*** Skip installing %s support' % module['name'])
215 print('*** Check your CPATH environment variable')
216 print('**************************************************')
217 continue
218
219 if not check_library(compiler,
220 libraries=module['libraries'],
221 library_dirs=settings['library_dirs']):
222 print('**************************************************')
223 print('*** Cannot link libraries: %s' % module['libraries'])
224 print('*** Skip installing %s support' % module['name'])
225 print('*** Check your LIBRARY_PATH environment variable')
226 print('**************************************************')
227 continue
228
229 s = settings.copy()
230 if not no_cuda:
231 s['libraries'] = module['libraries']
232 ret.extend([
233 setuptools.Extension(f, [path.join(*f.split('.')) + '.pyx'], **s)
234 for f in module['file']])
235 return ret
236
237
238 _arg_options = {}
239
240
241 def parse_args():
242 global _arg_options
243 _arg_options['profile'] = '--cupy-profile' in sys.argv
244 if _arg_options['profile']:
245 sys.argv.remove('--cupy-profile')
246
247 cupy_coverage = '--cupy-coverage' in sys.argv
248 if cupy_coverage:
249 sys.argv.remove('--cupy-coverage')
250 _arg_options['linetrace'] = cupy_coverage
251 _arg_options['annotate'] = cupy_coverage
252
253 _arg_options['no_cuda'] = '--cupy-no-cuda' in sys.argv
254 if _arg_options['no_cuda']:
255 sys.argv.remove('--cupy-no-cuda')
256 if check_readthedocs_environment():
257 _arg_options['no_cuda'] = True
258
259
260 def get_cython_pkg():
261 return pkg_resources.get_distribution('cython')
262
263
264 def cythonize(extensions, force=False, annotate=False, compiler_directives={}):
265 cython_location = get_cython_pkg().location
266 cython_path = path.join(cython_location, 'cython.py')
267 print("cython path:%s" % cython_location)
268 cython_cmdbase = [sys.executable, cython_path]
269 subprocess.check_call(cython_cmdbase + ['--version'])
270
271 cython_cmdbase.extend(['--fast-fail', '--verbose', '--cplus'])
272 for ext in extensions:
273 cmd = list(cython_cmdbase)
274 for i in compiler_directives.items():
275 cmd.append('--directive')
276 cmd.append('%s=%s' % i)
277 subprocess.check_call(cmd + ext.sources)
278
279
280 def to_cpp_extensions(extensions):
281 ret = []
282 for x in extensions:
283 ext = copy.copy(x)
284 ext.sources = [path.splitext(f)[0] + ".cpp" for f in x.sources]
285 ret.append(ext)
286 return ret
287
288
289 def check_extensions(extensions):
290 for x in extensions:
291 for f in x.sources:
292 if not path.isfile(f):
293 msg = ('Missing file: %s\n' % f +
294 'Please install Cython.\n' +
295 'See http://docs.chainer.org/en/stable/install.html')
296 raise RuntimeError(msg)
297
298
299 class chainer_build_ext(build_ext.build_ext):
300
301 """`build_ext` command for cython files."""
302
303 def finalize_options(self):
304 ext_modules = self.distribution.ext_modules
305 if dummy_extension in ext_modules:
306 print('Executing cythonize')
307 print('Options:', _arg_options)
308
309 directive_keys = ('linetrace', 'profile')
310 directives = {key: _arg_options[key] for key in directive_keys}
311
312 cythonize_option_keys = ('annotate',)
313 cythonize_options = {
314 key: _arg_options[key] for key in cythonize_option_keys}
315
316 compiler = distutils.ccompiler.new_compiler(self.compiler)
317 distutils.sysconfig.customize_compiler(compiler)
318
319 extensions = make_extensions(_arg_options, compiler)
320
321 cython = get_cython_pkg()
322 req_version = pkg_resources.parse_version(cython_version)
323 if cython is not None and cython.parsed_version > req_version:
324 cythonize(extensions, force=True,
325 compiler_directives=directives, **cythonize_options)
326
327 extensions = to_cpp_extensions(extensions)
328 check_extensions(extensions)
329
330 # Modify ext_modules for cython
331 ext_modules.remove(dummy_extension)
332 ext_modules.extend(extensions)
333
334 build_ext.build_ext.finalize_options(self)
335
[end of chainer_setup_build.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/chainer_setup_build.py b/chainer_setup_build.py
--- a/chainer_setup_build.py
+++ b/chainer_setup_build.py
@@ -261,12 +261,26 @@
return pkg_resources.get_distribution('cython')
+def run_command(cmd):
+ try:
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as e:
+ msg = '''Command %r failed:
+
+ command: %s
+ return code: %d
+ output:
+
+%s''' % (cmd[0], ' '.join(e.cmd), e.returncode, e.output)
+ raise distutils.errors.DistutilsExecError(msg)
+
+
def cythonize(extensions, force=False, annotate=False, compiler_directives={}):
cython_location = get_cython_pkg().location
cython_path = path.join(cython_location, 'cython.py')
print("cython path:%s" % cython_location)
cython_cmdbase = [sys.executable, cython_path]
- subprocess.check_call(cython_cmdbase + ['--version'])
+ run_command(cython_cmdbase + ['--version'])
cython_cmdbase.extend(['--fast-fail', '--verbose', '--cplus'])
for ext in extensions:
@@ -274,7 +288,7 @@
for i in compiler_directives.items():
cmd.append('--directive')
cmd.append('%s=%s' % i)
- subprocess.check_call(cmd + ext.sources)
+ run_command(cmd + ext.sources)
def to_cpp_extensions(extensions):
| {"golden_diff": "diff --git a/chainer_setup_build.py b/chainer_setup_build.py\n--- a/chainer_setup_build.py\n+++ b/chainer_setup_build.py\n@@ -261,12 +261,26 @@\n return pkg_resources.get_distribution('cython')\n \n \n+def run_command(cmd):\n+ try:\n+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)\n+ except subprocess.CalledProcessError as e:\n+ msg = '''Command %r failed:\n+\n+ command: %s\n+ return code: %d\n+ output:\n+\n+%s''' % (cmd[0], ' '.join(e.cmd), e.returncode, e.output)\n+ raise distutils.errors.DistutilsExecError(msg)\n+\n+\n def cythonize(extensions, force=False, annotate=False, compiler_directives={}):\n cython_location = get_cython_pkg().location\n cython_path = path.join(cython_location, 'cython.py')\n print(\"cython path:%s\" % cython_location)\n cython_cmdbase = [sys.executable, cython_path]\n- subprocess.check_call(cython_cmdbase + ['--version'])\n+ run_command(cython_cmdbase + ['--version'])\n \n cython_cmdbase.extend(['--fast-fail', '--verbose', '--cplus'])\n for ext in extensions:\n@@ -274,7 +288,7 @@\n for i in compiler_directives.items():\n cmd.append('--directive')\n cmd.append('%s=%s' % i)\n- subprocess.check_call(cmd + ext.sources)\n+ run_command(cmd + ext.sources)\n \n \n def to_cpp_extensions(extensions):\n", "issue": "Show more error messages when cython fails\nSome users failed to run `cython` and it shows no message (I don't know the reason). In this case, `subprocess.check_call` shows nothing. We need to show an error message in `chainer_setup_build.py`.\n\n", "before_files": [{"content": "from __future__ import print_function\nimport copy\nimport distutils\nimport os\nfrom os import path\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\n\nimport pkg_resources\nimport setuptools\nfrom setuptools.command import build_ext\n\n\ndummy_extension = setuptools.Extension('chainer', ['chainer.c'])\n\ncython_version = '0.23.0'\n\nMODULES = [\n {\n 'name': 'cuda',\n 'file': [\n 'cupy.core.core',\n 'cupy.core.flags',\n 'cupy.cuda.cublas',\n 'cupy.cuda.curand',\n 'cupy.cuda.device',\n 'cupy.cuda.driver',\n 'cupy.cuda.memory',\n 'cupy.cuda.function',\n 'cupy.cuda.runtime',\n 'cupy.util',\n ],\n 'include': [\n 'cublas_v2.h',\n 'cuda.h',\n 'cuda_runtime.h',\n 'curand.h',\n ],\n 'libraries': [\n 'cublas',\n 'cuda',\n 'cudart',\n 'curand',\n ],\n },\n {\n 'name': 'cudnn',\n 'file': [\n 'cupy.cuda.cudnn',\n ],\n 'include': [\n 'cudnn.h',\n ],\n 'libraries': [\n 'cudnn',\n ],\n }\n]\n\n\ndef get_compiler_setting():\n nvcc_path = search_on_path(('nvcc', 'nvcc.exe'))\n cuda_path_default = None\n if nvcc_path is None:\n print('**************************************************************')\n print('*** WARNING: nvcc not in path.')\n print('*** WARNING: Please set path to nvcc.')\n print('**************************************************************')\n else:\n cuda_path_default = path.normpath(\n path.join(path.dirname(nvcc_path), '..'))\n\n cuda_path = os.environ.get('CUDA_PATH', '') # Nvidia default on Windows\n if len(cuda_path) > 0 and cuda_path != cuda_path_default:\n print('**************************************************************')\n print('*** WARNING: nvcc path != CUDA_PATH')\n print('*** WARNING: nvcc path: %s', cuda_path_default)\n print('*** WARNING: CUDA_PATH: %s', cuda_path)\n print('**************************************************************')\n\n if not path.exists(cuda_path):\n cuda_path = cuda_path_default\n\n if not cuda_path and path.exists('/usr/local/cuda'):\n cuda_path = '/usr/local/cuda'\n\n include_dirs = []\n library_dirs = []\n define_macros = []\n\n if cuda_path:\n include_dirs.append(path.join(cuda_path, 'include'))\n if sys.platform == 'win32':\n library_dirs.append(path.join(cuda_path, 'bin'))\n library_dirs.append(path.join(cuda_path, 'lib', 'x64'))\n else:\n library_dirs.append(path.join(cuda_path, 'lib64'))\n library_dirs.append(path.join(cuda_path, 'lib'))\n if sys.platform == 'darwin':\n library_dirs.append('/usr/local/cuda/lib')\n\n return {\n 'include_dirs': include_dirs,\n 'library_dirs': library_dirs,\n 'define_macros': define_macros,\n 'language': 'c++',\n }\n\n\ndef localpath(*args):\n return path.abspath(path.join(path.dirname(__file__), *args))\n\n\ndef get_path(key):\n return os.environ.get(key, '').split(os.pathsep)\n\n\ndef search_on_path(filenames):\n for p in get_path('PATH'):\n for filename in filenames:\n full = path.join(p, filename)\n if path.exists(full):\n return path.abspath(full)\n\n\ndef check_include(dirs, file_path):\n return any(path.exists(path.join(dir, file_path)) for dir in dirs)\n\n\ndef check_readthedocs_environment():\n return os.environ.get('READTHEDOCS', None) == 'True'\n\n\ndef check_library(compiler, includes=[], libraries=[],\n include_dirs=[], library_dirs=[]):\n temp_dir = tempfile.mkdtemp()\n\n try:\n source = '''\n int main(int argc, char* argv[]) {\n return 0;\n }\n '''\n fname = os.path.join(temp_dir, 'a.cpp')\n with open(fname, 'w') as f:\n for header in includes:\n f.write('#include <%s>\\n' % header)\n f.write(source)\n\n try:\n objects = compiler.compile([fname], output_dir=temp_dir,\n include_dirs=include_dirs)\n except distutils.errors.CompileError:\n return False\n\n try:\n compiler.link_shared_lib(objects,\n os.path.join(temp_dir, 'a'),\n libraries=libraries,\n library_dirs=library_dirs)\n except (distutils.errors.LinkError, TypeError):\n return False\n\n return True\n\n finally:\n shutil.rmtree(temp_dir, ignore_errors=True)\n\n\ndef make_extensions(options, compiler):\n\n \"\"\"Produce a list of Extension instances which passed to cythonize().\"\"\"\n\n no_cuda = options['no_cuda']\n settings = get_compiler_setting()\n\n try:\n import numpy\n numpy_include = numpy.get_include()\n except AttributeError:\n # if numpy is not installed get the headers from the .egg directory\n import numpy.core\n numpy_include = path.join(\n path.dirname(numpy.core.__file__), 'include')\n include_dirs = settings['include_dirs']\n include_dirs.append(numpy_include)\n\n settings['include_dirs'] = [\n x for x in include_dirs if path.exists(x)]\n settings['library_dirs'] = [\n x for x in settings['library_dirs'] if path.exists(x)]\n if sys.platform != 'win32':\n settings['runtime_library_dirs'] = settings['library_dirs']\n\n if options['linetrace']:\n settings['define_macros'].append(('CYTHON_TRACE', '1'))\n settings['define_macros'].append(('CYTHON_TRACE_NOGIL', '1'))\n if no_cuda:\n settings['define_macros'].append(('CUPY_NO_CUDA', '1'))\n\n ret = []\n for module in MODULES:\n print('Include directories:', settings['include_dirs'])\n print('Library directories:', settings['library_dirs'])\n\n if not no_cuda:\n if not check_library(compiler,\n includes=module['include'],\n include_dirs=settings['include_dirs']):\n print('**************************************************')\n print('*** Include files not found: %s' % module['include'])\n print('*** Skip installing %s support' % module['name'])\n print('*** Check your CPATH environment variable')\n print('**************************************************')\n continue\n\n if not check_library(compiler,\n libraries=module['libraries'],\n library_dirs=settings['library_dirs']):\n print('**************************************************')\n print('*** Cannot link libraries: %s' % module['libraries'])\n print('*** Skip installing %s support' % module['name'])\n print('*** Check your LIBRARY_PATH environment variable')\n print('**************************************************')\n continue\n\n s = settings.copy()\n if not no_cuda:\n s['libraries'] = module['libraries']\n ret.extend([\n setuptools.Extension(f, [path.join(*f.split('.')) + '.pyx'], **s)\n for f in module['file']])\n return ret\n\n\n_arg_options = {}\n\n\ndef parse_args():\n global _arg_options\n _arg_options['profile'] = '--cupy-profile' in sys.argv\n if _arg_options['profile']:\n sys.argv.remove('--cupy-profile')\n\n cupy_coverage = '--cupy-coverage' in sys.argv\n if cupy_coverage:\n sys.argv.remove('--cupy-coverage')\n _arg_options['linetrace'] = cupy_coverage\n _arg_options['annotate'] = cupy_coverage\n\n _arg_options['no_cuda'] = '--cupy-no-cuda' in sys.argv\n if _arg_options['no_cuda']:\n sys.argv.remove('--cupy-no-cuda')\n if check_readthedocs_environment():\n _arg_options['no_cuda'] = True\n\n\ndef get_cython_pkg():\n return pkg_resources.get_distribution('cython')\n\n\ndef cythonize(extensions, force=False, annotate=False, compiler_directives={}):\n cython_location = get_cython_pkg().location\n cython_path = path.join(cython_location, 'cython.py')\n print(\"cython path:%s\" % cython_location)\n cython_cmdbase = [sys.executable, cython_path]\n subprocess.check_call(cython_cmdbase + ['--version'])\n\n cython_cmdbase.extend(['--fast-fail', '--verbose', '--cplus'])\n for ext in extensions:\n cmd = list(cython_cmdbase)\n for i in compiler_directives.items():\n cmd.append('--directive')\n cmd.append('%s=%s' % i)\n subprocess.check_call(cmd + ext.sources)\n\n\ndef to_cpp_extensions(extensions):\n ret = []\n for x in extensions:\n ext = copy.copy(x)\n ext.sources = [path.splitext(f)[0] + \".cpp\" for f in x.sources]\n ret.append(ext)\n return ret\n\n\ndef check_extensions(extensions):\n for x in extensions:\n for f in x.sources:\n if not path.isfile(f):\n msg = ('Missing file: %s\\n' % f +\n 'Please install Cython.\\n' +\n 'See http://docs.chainer.org/en/stable/install.html')\n raise RuntimeError(msg)\n\n\nclass chainer_build_ext(build_ext.build_ext):\n\n \"\"\"`build_ext` command for cython files.\"\"\"\n\n def finalize_options(self):\n ext_modules = self.distribution.ext_modules\n if dummy_extension in ext_modules:\n print('Executing cythonize')\n print('Options:', _arg_options)\n\n directive_keys = ('linetrace', 'profile')\n directives = {key: _arg_options[key] for key in directive_keys}\n\n cythonize_option_keys = ('annotate',)\n cythonize_options = {\n key: _arg_options[key] for key in cythonize_option_keys}\n\n compiler = distutils.ccompiler.new_compiler(self.compiler)\n distutils.sysconfig.customize_compiler(compiler)\n\n extensions = make_extensions(_arg_options, compiler)\n\n cython = get_cython_pkg()\n req_version = pkg_resources.parse_version(cython_version)\n if cython is not None and cython.parsed_version > req_version:\n cythonize(extensions, force=True,\n compiler_directives=directives, **cythonize_options)\n\n extensions = to_cpp_extensions(extensions)\n check_extensions(extensions)\n\n # Modify ext_modules for cython\n ext_modules.remove(dummy_extension)\n ext_modules.extend(extensions)\n\n build_ext.build_ext.finalize_options(self)\n", "path": "chainer_setup_build.py"}]} | 3,806 | 360 |
gh_patches_debug_12239 | rasdani/github-patches | git_diff | spyder-ide__spyder-9436 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Only require PyWebEngine on Python 3+ to avoid blocking py2 compat in pip wheels
<!--- **PLEASE READ:** When submitting here, please ensure you've completed the following checklist and checked the boxes to confirm. Issue reports without it may be closed. Thanks! --->
## Problem Description
Currently, Spyder 3.3.4 cannot be installed on Python 2.7.x via ``pip``, because it requires ``PyQtWebEngine`` which is not available for Python 2. However, this is not required, because PyQt 5.12 (which requires it) isn't compatible with Python 2 anyway, so this blocks Python 2 compat unnecessarily. Therefore, per @ccordoba12 's , we should not require ``PyQtWebEngine`` on Python 2.
## Versions
<!--- You can get this information from Help > About Spyder...
or (if Spyder won't launch) the "conda list" command
from the Anaconda Prompt/Terminal/command line. --->
* Spyder version: 3.3.4
* Python version: 2.7.x
* Qt version: <5.12
* PyQt version: <5.12
* Operating System name/version: Any, but mostly Linux
</issue>
<code>
[start of setup.py]
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright © Spyder Project Contributors
4 # Licensed under the terms of the MIT License
5 # (see spyder/__init__.py for details)
6
7 """
8 Spyder
9 ======
10
11 The Scientific Python Development Environment
12
13 Spyder is a powerful scientific environment written in Python, for Python,
14 and designed by and for scientists, engineers and data analysts.
15
16 It features a unique combination of the advanced editing, analysis, debugging
17 and profiling functionality of a comprehensive development tool with the data
18 exploration, interactive execution, deep inspection and beautiful visualization
19 capabilities of a scientific package.
20 """
21
22 from __future__ import print_function
23
24 import io
25 import os
26 import os.path as osp
27 import subprocess
28 import sys
29 import shutil
30
31 from distutils.core import setup
32 from distutils.command.install_data import install_data
33
34
35 #==============================================================================
36 # Check for Python 3
37 #==============================================================================
38 PY3 = sys.version_info[0] == 3
39
40
41 #==============================================================================
42 # Minimal Python version sanity check
43 # Taken from the notebook setup.py -- Modified BSD License
44 #==============================================================================
45 v = sys.version_info
46 if v[:2] < (2, 7) or (v[0] >= 3 and v[:2] < (3, 4)):
47 error = "ERROR: Spyder requires Python version 2.7 or 3.4 and above."
48 print(error, file=sys.stderr)
49 sys.exit(1)
50
51
52 #==============================================================================
53 # Constants
54 #==============================================================================
55 NAME = 'spyder'
56 LIBNAME = 'spyder'
57 from spyder import __version__, __website_url__ #analysis:ignore
58
59
60 #==============================================================================
61 # Auxiliary functions
62 #==============================================================================
63 def get_package_data(name, extlist):
64 """Return data files for package *name* with extensions in *extlist*"""
65 flist = []
66 # Workaround to replace os.path.relpath (not available until Python 2.6):
67 offset = len(name)+len(os.pathsep)
68 for dirpath, _dirnames, filenames in os.walk(name):
69 for fname in filenames:
70 if not fname.startswith('.') and osp.splitext(fname)[1] in extlist:
71 flist.append(osp.join(dirpath, fname)[offset:])
72 return flist
73
74
75 def get_subpackages(name):
76 """Return subpackages of package *name*"""
77 splist = []
78 for dirpath, _dirnames, _filenames in os.walk(name):
79 if osp.isfile(osp.join(dirpath, '__init__.py')):
80 splist.append(".".join(dirpath.split(os.sep)))
81 return splist
82
83
84 def get_data_files():
85 """Return data_files in a platform dependent manner"""
86 if sys.platform.startswith('linux'):
87 if PY3:
88 data_files = [('share/applications', ['scripts/spyder3.desktop']),
89 ('share/icons', ['img_src/spyder3.png']),
90 ('share/metainfo', ['scripts/spyder3.appdata.xml'])]
91 else:
92 data_files = [('share/applications', ['scripts/spyder.desktop']),
93 ('share/icons', ['img_src/spyder.png'])]
94 elif os.name == 'nt':
95 data_files = [('scripts', ['img_src/spyder.ico',
96 'img_src/spyder_reset.ico'])]
97 else:
98 data_files = []
99 return data_files
100
101
102 def get_packages():
103 """Return package list"""
104 packages = (
105 get_subpackages(LIBNAME)
106 + get_subpackages('spyder_breakpoints')
107 + get_subpackages('spyder_profiler')
108 + get_subpackages('spyder_pylint')
109 + get_subpackages('spyder_io_dcm')
110 + get_subpackages('spyder_io_hdf5')
111 )
112 return packages
113
114
115 #==============================================================================
116 # Make Linux detect Spyder desktop file
117 #==============================================================================
118 class MyInstallData(install_data):
119 def run(self):
120 install_data.run(self)
121 if sys.platform.startswith('linux'):
122 try:
123 subprocess.call(['update-desktop-database'])
124 except:
125 print("ERROR: unable to update desktop database",
126 file=sys.stderr)
127 CMDCLASS = {'install_data': MyInstallData}
128
129
130 #==============================================================================
131 # Main scripts
132 #==============================================================================
133 # NOTE: the '[...]_win_post_install.py' script is installed even on non-Windows
134 # platforms due to a bug in pip installation process (see Issue 1158)
135 SCRIPTS = ['%s_win_post_install.py' % NAME]
136 if PY3 and sys.platform.startswith('linux'):
137 SCRIPTS.append('spyder3')
138 else:
139 SCRIPTS.append('spyder')
140
141
142 #==============================================================================
143 # Files added to the package
144 #==============================================================================
145 EXTLIST = ['.mo', '.svg', '.png', '.css', '.html', '.js', '.chm', '.ini',
146 '.txt', '.rst', '.qss', '.ttf', '.json', '.c', '.cpp', '.java',
147 '.md', '.R', '.csv', '.pyx', '.ipynb', '.xml']
148 if os.name == 'nt':
149 SCRIPTS += ['spyder.bat']
150 EXTLIST += ['.ico']
151
152
153 #==============================================================================
154 # Use Readme for long description
155 #==============================================================================
156 with io.open('README.md', encoding='utf-8') as f:
157 LONG_DESCRIPTION = f.read()
158
159
160 #==============================================================================
161 # Setup arguments
162 #==============================================================================
163 setup_args = dict(
164 name=NAME,
165 version=__version__,
166 description='The Scientific Python Development Environment',
167 long_description=LONG_DESCRIPTION,
168 long_description_content_type='text/markdown',
169 download_url=__website_url__ + "#fh5co-download",
170 author="The Spyder Project Contributors",
171 author_email="[email protected]",
172 url=__website_url__,
173 license='MIT',
174 keywords='PyQt5 editor console widgets IDE science data analysis IPython',
175 platforms=["Windows", "Linux", "Mac OS-X"],
176 packages=get_packages(),
177 package_data={LIBNAME: get_package_data(LIBNAME, EXTLIST),
178 'spyder_breakpoints': get_package_data('spyder_breakpoints',
179 EXTLIST),
180 'spyder_profiler': get_package_data('spyder_profiler',
181 EXTLIST),
182 'spyder_pylint': get_package_data('spyder_pylint',
183 EXTLIST),
184 'spyder_io_dcm': get_package_data('spyder_io_dcm',
185 EXTLIST),
186 'spyder_io_hdf5': get_package_data('spyder_io_hdf5',
187 EXTLIST),
188 },
189 scripts=[osp.join('scripts', fname) for fname in SCRIPTS],
190 data_files=get_data_files(),
191 classifiers=['License :: OSI Approved :: MIT License',
192 'Operating System :: MacOS',
193 'Operating System :: Microsoft :: Windows',
194 'Operating System :: POSIX :: Linux',
195 'Programming Language :: Python :: 2',
196 'Programming Language :: Python :: 2.7',
197 'Programming Language :: Python :: 3',
198 'Programming Language :: Python :: 3.4',
199 'Programming Language :: Python :: 3.5',
200 'Programming Language :: Python :: 3.6',
201 'Programming Language :: Python :: 3.7',
202 'Development Status :: 5 - Production/Stable',
203 'Intended Audience :: Education',
204 'Intended Audience :: Science/Research',
205 'Intended Audience :: Developers',
206 'Topic :: Scientific/Engineering',
207 'Topic :: Software Development :: Widget Sets'],
208 cmdclass=CMDCLASS)
209
210
211 #==============================================================================
212 # Setuptools deps
213 #==============================================================================
214 if any(arg == 'bdist_wheel' for arg in sys.argv):
215 import setuptools # analysis:ignore
216
217 install_requires = [
218 'cloudpickle',
219 'rope>=0.10.5',
220 'jedi>=0.9.0',
221 'pyflakes',
222 'pygments>=2.0',
223 'qtconsole>=4.2.0',
224 'nbconvert',
225 'sphinx',
226 'pycodestyle',
227 'pylint',
228 'psutil',
229 'qtawesome>=0.5.7',
230 'qtpy>=1.5.0',
231 'pickleshare',
232 'pyzmq',
233 'chardet>=2.0.0',
234 'numpydoc',
235 'spyder-kernels>=0.4.3,<1.0',
236 # Don't require keyring for Python 2 and Linux
237 # because it depends on system packages
238 'keyring;sys_platform!="linux2"',
239 # Packages for pyqt5 are only available in
240 # Python 3
241 'pyqt5<5.13;python_version>="3"',
242 # pyqt5 5.12 split WebEngine into the
243 # pyqtwebengine module
244 'pyqtwebengine<5.13'
245 ]
246
247 extras_require = {
248 'test:python_version == "2.7"': ['mock'],
249 'test': ['pytest<4.1',
250 'pytest-qt',
251 'pytest-mock',
252 'pytest-cov',
253 'pytest-xvfb',
254 'mock',
255 'flaky',
256 'pandas',
257 'scipy',
258 'sympy',
259 'pillow',
260 'matplotlib',
261 'cython'],
262 }
263
264 if 'setuptools' in sys.modules:
265 setup_args['install_requires'] = install_requires
266 setup_args['extras_require'] = extras_require
267
268 setup_args['entry_points'] = {
269 'gui_scripts': [
270 '{} = spyder.app.start:main'.format(
271 'spyder3' if PY3 else 'spyder')
272 ]
273 }
274
275 setup_args.pop('scripts', None)
276
277
278 #==============================================================================
279 # Main setup
280 #==============================================================================
281 setup(**setup_args)
282
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -241,7 +241,7 @@
'pyqt5<5.13;python_version>="3"',
# pyqt5 5.12 split WebEngine into the
# pyqtwebengine module
- 'pyqtwebengine<5.13'
+ 'pyqtwebengine<5.13;python_version>="3"'
]
extras_require = {
@@ -250,7 +250,7 @@
'pytest-qt',
'pytest-mock',
'pytest-cov',
- 'pytest-xvfb',
+ 'pytest-xvfb;platform_system=="Linux"',
'mock',
'flaky',
'pandas',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -241,7 +241,7 @@\n 'pyqt5<5.13;python_version>=\"3\"',\n # pyqt5 5.12 split WebEngine into the\n # pyqtwebengine module\n- 'pyqtwebengine<5.13'\n+ 'pyqtwebengine<5.13;python_version>=\"3\"'\n ]\n \n extras_require = {\n@@ -250,7 +250,7 @@\n 'pytest-qt',\n 'pytest-mock',\n 'pytest-cov',\n- 'pytest-xvfb',\n+ 'pytest-xvfb;platform_system==\"Linux\"',\n 'mock',\n 'flaky',\n 'pandas',\n", "issue": "Only require PyWebEngine on Python 3+ to avoid blocking py2 compat in pip wheels\n<!--- **PLEASE READ:** When submitting here, please ensure you've completed the following checklist and checked the boxes to confirm. Issue reports without it may be closed. Thanks! --->\r\n\r\n## Problem Description\r\n\r\nCurrently, Spyder 3.3.4 cannot be installed on Python 2.7.x via ``pip``, because it requires ``PyQtWebEngine`` which is not available for Python 2. However, this is not required, because PyQt 5.12 (which requires it) isn't compatible with Python 2 anyway, so this blocks Python 2 compat unnecessarily. Therefore, per @ccordoba12 's , we should not require ``PyQtWebEngine`` on Python 2.\r\n\r\n\r\n## Versions\r\n<!--- You can get this information from Help > About Spyder...\r\nor (if Spyder won't launch) the \"conda list\" command\r\nfrom the Anaconda Prompt/Terminal/command line. --->\r\n\r\n* Spyder version: 3.3.4\r\n* Python version: 2.7.x\r\n* Qt version: <5.12 \r\n* PyQt version: <5.12\r\n* Operating System name/version: Any, but mostly Linux\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright \u00a9 Spyder Project Contributors\n# Licensed under the terms of the MIT License\n# (see spyder/__init__.py for details)\n\n\"\"\"\nSpyder\n======\n\nThe Scientific Python Development Environment\n\nSpyder is a powerful scientific environment written in Python, for Python,\nand designed by and for scientists, engineers and data analysts.\n\nIt features a unique combination of the advanced editing, analysis, debugging\nand profiling functionality of a comprehensive development tool with the data\nexploration, interactive execution, deep inspection and beautiful visualization\ncapabilities of a scientific package.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport io\nimport os\nimport os.path as osp\nimport subprocess\nimport sys\nimport shutil\n\nfrom distutils.core import setup\nfrom distutils.command.install_data import install_data\n\n\n#==============================================================================\n# Check for Python 3\n#==============================================================================\nPY3 = sys.version_info[0] == 3\n\n\n#==============================================================================\n# Minimal Python version sanity check\n# Taken from the notebook setup.py -- Modified BSD License\n#==============================================================================\nv = sys.version_info\nif v[:2] < (2, 7) or (v[0] >= 3 and v[:2] < (3, 4)):\n error = \"ERROR: Spyder requires Python version 2.7 or 3.4 and above.\"\n print(error, file=sys.stderr)\n sys.exit(1)\n\n\n#==============================================================================\n# Constants\n#==============================================================================\nNAME = 'spyder'\nLIBNAME = 'spyder'\nfrom spyder import __version__, __website_url__ #analysis:ignore\n\n\n#==============================================================================\n# Auxiliary functions\n#==============================================================================\ndef get_package_data(name, extlist):\n \"\"\"Return data files for package *name* with extensions in *extlist*\"\"\"\n flist = []\n # Workaround to replace os.path.relpath (not available until Python 2.6):\n offset = len(name)+len(os.pathsep)\n for dirpath, _dirnames, filenames in os.walk(name):\n for fname in filenames:\n if not fname.startswith('.') and osp.splitext(fname)[1] in extlist:\n flist.append(osp.join(dirpath, fname)[offset:])\n return flist\n\n\ndef get_subpackages(name):\n \"\"\"Return subpackages of package *name*\"\"\"\n splist = []\n for dirpath, _dirnames, _filenames in os.walk(name):\n if osp.isfile(osp.join(dirpath, '__init__.py')):\n splist.append(\".\".join(dirpath.split(os.sep)))\n return splist\n\n\ndef get_data_files():\n \"\"\"Return data_files in a platform dependent manner\"\"\"\n if sys.platform.startswith('linux'):\n if PY3:\n data_files = [('share/applications', ['scripts/spyder3.desktop']),\n ('share/icons', ['img_src/spyder3.png']),\n ('share/metainfo', ['scripts/spyder3.appdata.xml'])]\n else:\n data_files = [('share/applications', ['scripts/spyder.desktop']),\n ('share/icons', ['img_src/spyder.png'])]\n elif os.name == 'nt':\n data_files = [('scripts', ['img_src/spyder.ico',\n 'img_src/spyder_reset.ico'])]\n else:\n data_files = []\n return data_files\n\n\ndef get_packages():\n \"\"\"Return package list\"\"\"\n packages = (\n get_subpackages(LIBNAME)\n + get_subpackages('spyder_breakpoints')\n + get_subpackages('spyder_profiler')\n + get_subpackages('spyder_pylint')\n + get_subpackages('spyder_io_dcm')\n + get_subpackages('spyder_io_hdf5')\n )\n return packages\n\n\n#==============================================================================\n# Make Linux detect Spyder desktop file\n#==============================================================================\nclass MyInstallData(install_data):\n def run(self):\n install_data.run(self)\n if sys.platform.startswith('linux'):\n try:\n subprocess.call(['update-desktop-database'])\n except:\n print(\"ERROR: unable to update desktop database\",\n file=sys.stderr)\nCMDCLASS = {'install_data': MyInstallData}\n\n\n#==============================================================================\n# Main scripts\n#==============================================================================\n# NOTE: the '[...]_win_post_install.py' script is installed even on non-Windows\n# platforms due to a bug in pip installation process (see Issue 1158)\nSCRIPTS = ['%s_win_post_install.py' % NAME]\nif PY3 and sys.platform.startswith('linux'):\n SCRIPTS.append('spyder3')\nelse:\n SCRIPTS.append('spyder')\n\n\n#==============================================================================\n# Files added to the package\n#==============================================================================\nEXTLIST = ['.mo', '.svg', '.png', '.css', '.html', '.js', '.chm', '.ini',\n '.txt', '.rst', '.qss', '.ttf', '.json', '.c', '.cpp', '.java',\n '.md', '.R', '.csv', '.pyx', '.ipynb', '.xml']\nif os.name == 'nt':\n SCRIPTS += ['spyder.bat']\n EXTLIST += ['.ico']\n\n\n#==============================================================================\n# Use Readme for long description\n#==============================================================================\nwith io.open('README.md', encoding='utf-8') as f:\n LONG_DESCRIPTION = f.read()\n\n\n#==============================================================================\n# Setup arguments\n#==============================================================================\nsetup_args = dict(\n name=NAME,\n version=__version__,\n description='The Scientific Python Development Environment',\n long_description=LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n download_url=__website_url__ + \"#fh5co-download\",\n author=\"The Spyder Project Contributors\",\n author_email=\"[email protected]\",\n url=__website_url__,\n license='MIT',\n keywords='PyQt5 editor console widgets IDE science data analysis IPython',\n platforms=[\"Windows\", \"Linux\", \"Mac OS-X\"],\n packages=get_packages(),\n package_data={LIBNAME: get_package_data(LIBNAME, EXTLIST),\n 'spyder_breakpoints': get_package_data('spyder_breakpoints',\n EXTLIST),\n 'spyder_profiler': get_package_data('spyder_profiler',\n EXTLIST),\n 'spyder_pylint': get_package_data('spyder_pylint',\n EXTLIST),\n 'spyder_io_dcm': get_package_data('spyder_io_dcm',\n EXTLIST),\n 'spyder_io_hdf5': get_package_data('spyder_io_hdf5',\n EXTLIST),\n },\n scripts=[osp.join('scripts', fname) for fname in SCRIPTS],\n data_files=get_data_files(),\n classifiers=['License :: OSI Approved :: MIT License',\n 'Operating System :: MacOS',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Software Development :: Widget Sets'],\n cmdclass=CMDCLASS)\n\n\n#==============================================================================\n# Setuptools deps\n#==============================================================================\nif any(arg == 'bdist_wheel' for arg in sys.argv):\n import setuptools # analysis:ignore\n\ninstall_requires = [\n 'cloudpickle',\n 'rope>=0.10.5',\n 'jedi>=0.9.0',\n 'pyflakes',\n 'pygments>=2.0',\n 'qtconsole>=4.2.0',\n 'nbconvert',\n 'sphinx',\n 'pycodestyle',\n 'pylint',\n 'psutil',\n 'qtawesome>=0.5.7',\n 'qtpy>=1.5.0',\n 'pickleshare',\n 'pyzmq',\n 'chardet>=2.0.0',\n 'numpydoc',\n 'spyder-kernels>=0.4.3,<1.0',\n # Don't require keyring for Python 2 and Linux\n # because it depends on system packages\n 'keyring;sys_platform!=\"linux2\"',\n # Packages for pyqt5 are only available in\n # Python 3\n 'pyqt5<5.13;python_version>=\"3\"',\n # pyqt5 5.12 split WebEngine into the\n # pyqtwebengine module\n 'pyqtwebengine<5.13'\n]\n\nextras_require = {\n 'test:python_version == \"2.7\"': ['mock'],\n 'test': ['pytest<4.1',\n 'pytest-qt',\n 'pytest-mock',\n 'pytest-cov',\n 'pytest-xvfb',\n 'mock',\n 'flaky',\n 'pandas',\n 'scipy',\n 'sympy',\n 'pillow',\n 'matplotlib',\n 'cython'],\n}\n\nif 'setuptools' in sys.modules:\n setup_args['install_requires'] = install_requires\n setup_args['extras_require'] = extras_require\n\n setup_args['entry_points'] = {\n 'gui_scripts': [\n '{} = spyder.app.start:main'.format(\n 'spyder3' if PY3 else 'spyder')\n ]\n }\n\n setup_args.pop('scripts', None)\n\n\n#==============================================================================\n# Main setup\n#==============================================================================\nsetup(**setup_args)\n", "path": "setup.py"}]} | 3,641 | 180 |
gh_patches_debug_15385 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-622 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Still get notifications for follows from blocked user
**Describe the bug**
I blocked https://bookwyrm.social/user/[email protected], but I'm still getting follow notifications from them. I also can still see them in my followers list
**To Reproduce**
1. Block user that currently follows you
2. View own follow list, see that they still follow you
3. Have that user (while blocked) refollow you
4. See notification for that follow
**Expected behavior**
I expect that a block would force a user to unfollow me. Even if this didn't happen, though, I'd like to no longer receive notifications from activity of users that I've blocked, including follow notifications
</issue>
<code>
[start of bookwyrm/models/relationship.py]
1 ''' defines relationships between users '''
2 from django.apps import apps
3 from django.db import models, transaction
4 from django.db.models import Q
5 from django.dispatch import receiver
6
7 from bookwyrm import activitypub
8 from .activitypub_mixin import ActivitypubMixin, ActivityMixin
9 from .base_model import BookWyrmModel
10 from . import fields
11
12
13 class UserRelationship(BookWyrmModel):
14 ''' many-to-many through table for followers '''
15 user_subject = fields.ForeignKey(
16 'User',
17 on_delete=models.PROTECT,
18 related_name='%(class)s_user_subject',
19 activitypub_field='actor',
20 )
21 user_object = fields.ForeignKey(
22 'User',
23 on_delete=models.PROTECT,
24 related_name='%(class)s_user_object',
25 activitypub_field='object',
26 )
27
28 @property
29 def privacy(self):
30 ''' all relationships are handled directly with the participants '''
31 return 'direct'
32
33 @property
34 def recipients(self):
35 ''' the remote user needs to recieve direct broadcasts '''
36 return [u for u in [self.user_subject, self.user_object] if not u.local]
37
38 class Meta:
39 ''' relationships should be unique '''
40 abstract = True
41 constraints = [
42 models.UniqueConstraint(
43 fields=['user_subject', 'user_object'],
44 name='%(class)s_unique'
45 ),
46 models.CheckConstraint(
47 check=~models.Q(user_subject=models.F('user_object')),
48 name='%(class)s_no_self'
49 )
50 ]
51
52 def get_remote_id(self, status=None):# pylint: disable=arguments-differ
53 ''' use shelf identifier in remote_id '''
54 status = status or 'follows'
55 base_path = self.user_subject.remote_id
56 return '%s#%s/%d' % (base_path, status, self.id)
57
58
59 class UserFollows(ActivitypubMixin, UserRelationship):
60 ''' Following a user '''
61 status = 'follows'
62 activity_serializer = activitypub.Follow
63
64
65 @classmethod
66 def from_request(cls, follow_request):
67 ''' converts a follow request into a follow relationship '''
68 return cls.objects.create(
69 user_subject=follow_request.user_subject,
70 user_object=follow_request.user_object,
71 remote_id=follow_request.remote_id,
72 )
73
74
75 class UserFollowRequest(ActivitypubMixin, UserRelationship):
76 ''' following a user requires manual or automatic confirmation '''
77 status = 'follow_request'
78 activity_serializer = activitypub.Follow
79
80 def save(self, *args, broadcast=True, **kwargs):
81 ''' make sure the follow or block relationship doesn't already exist '''
82 try:
83 UserFollows.objects.get(
84 user_subject=self.user_subject,
85 user_object=self.user_object
86 )
87 UserBlocks.objects.get(
88 user_subject=self.user_subject,
89 user_object=self.user_object
90 )
91 return None
92 except (UserFollows.DoesNotExist, UserBlocks.DoesNotExist):
93 super().save(*args, **kwargs)
94
95 if broadcast and self.user_subject.local and not self.user_object.local:
96 self.broadcast(self.to_activity(), self.user_subject)
97
98 if self.user_object.local:
99 model = apps.get_model('bookwyrm.Notification', require_ready=True)
100 notification_type = 'FOLLOW_REQUEST' \
101 if self.user_object.manually_approves_followers else 'FOLLOW'
102 model.objects.create(
103 user=self.user_object,
104 related_user=self.user_subject,
105 notification_type=notification_type,
106 )
107
108
109 def accept(self):
110 ''' turn this request into the real deal'''
111 user = self.user_object
112 activity = activitypub.Accept(
113 id=self.get_remote_id(status='accepts'),
114 actor=self.user_object.remote_id,
115 object=self.to_activity()
116 ).serialize()
117 with transaction.atomic():
118 UserFollows.from_request(self)
119 self.delete()
120
121 self.broadcast(activity, user)
122
123
124 def reject(self):
125 ''' generate a Reject for this follow request '''
126 user = self.user_object
127 activity = activitypub.Reject(
128 id=self.get_remote_id(status='rejects'),
129 actor=self.user_object.remote_id,
130 object=self.to_activity()
131 ).serialize()
132 self.delete()
133 self.broadcast(activity, user)
134
135
136 class UserBlocks(ActivityMixin, UserRelationship):
137 ''' prevent another user from following you and seeing your posts '''
138 status = 'blocks'
139 activity_serializer = activitypub.Block
140
141
142 @receiver(models.signals.post_save, sender=UserBlocks)
143 #pylint: disable=unused-argument
144 def execute_after_save(sender, instance, created, *args, **kwargs):
145 ''' remove follow or follow request rels after a block is created '''
146 UserFollows.objects.filter(
147 Q(user_subject=instance.user_subject,
148 user_object=instance.user_object) | \
149 Q(user_subject=instance.user_object,
150 user_object=instance.user_subject)
151 ).delete()
152 UserFollowRequest.objects.filter(
153 Q(user_subject=instance.user_subject,
154 user_object=instance.user_object) | \
155 Q(user_subject=instance.user_object,
156 user_object=instance.user_subject)
157 ).delete()
158
[end of bookwyrm/models/relationship.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/bookwyrm/models/relationship.py b/bookwyrm/models/relationship.py
--- a/bookwyrm/models/relationship.py
+++ b/bookwyrm/models/relationship.py
@@ -82,11 +82,16 @@
try:
UserFollows.objects.get(
user_subject=self.user_subject,
- user_object=self.user_object
+ user_object=self.user_object,
)
+ # blocking in either direction is a no-go
UserBlocks.objects.get(
user_subject=self.user_subject,
- user_object=self.user_object
+ user_object=self.user_object,
+ )
+ UserBlocks.objects.get(
+ user_subject=self.user_object,
+ user_object=self.user_subject,
)
return None
except (UserFollows.DoesNotExist, UserBlocks.DoesNotExist):
| {"golden_diff": "diff --git a/bookwyrm/models/relationship.py b/bookwyrm/models/relationship.py\n--- a/bookwyrm/models/relationship.py\n+++ b/bookwyrm/models/relationship.py\n@@ -82,11 +82,16 @@\n try:\n UserFollows.objects.get(\n user_subject=self.user_subject,\n- user_object=self.user_object\n+ user_object=self.user_object,\n )\n+ # blocking in either direction is a no-go\n UserBlocks.objects.get(\n user_subject=self.user_subject,\n- user_object=self.user_object\n+ user_object=self.user_object,\n+ )\n+ UserBlocks.objects.get(\n+ user_subject=self.user_object,\n+ user_object=self.user_subject,\n )\n return None\n except (UserFollows.DoesNotExist, UserBlocks.DoesNotExist):\n", "issue": "Still get notifications for follows from blocked user\n**Describe the bug**\r\nI blocked https://bookwyrm.social/user/[email protected], but I'm still getting follow notifications from them. I also can still see them in my followers list\r\n\r\n**To Reproduce**\r\n1. Block user that currently follows you\r\n2. View own follow list, see that they still follow you\r\n3. Have that user (while blocked) refollow you\r\n4. See notification for that follow\r\n\r\n**Expected behavior**\r\nI expect that a block would force a user to unfollow me. Even if this didn't happen, though, I'd like to no longer receive notifications from activity of users that I've blocked, including follow notifications\n", "before_files": [{"content": "''' defines relationships between users '''\nfrom django.apps import apps\nfrom django.db import models, transaction\nfrom django.db.models import Q\nfrom django.dispatch import receiver\n\nfrom bookwyrm import activitypub\nfrom .activitypub_mixin import ActivitypubMixin, ActivityMixin\nfrom .base_model import BookWyrmModel\nfrom . import fields\n\n\nclass UserRelationship(BookWyrmModel):\n ''' many-to-many through table for followers '''\n user_subject = fields.ForeignKey(\n 'User',\n on_delete=models.PROTECT,\n related_name='%(class)s_user_subject',\n activitypub_field='actor',\n )\n user_object = fields.ForeignKey(\n 'User',\n on_delete=models.PROTECT,\n related_name='%(class)s_user_object',\n activitypub_field='object',\n )\n\n @property\n def privacy(self):\n ''' all relationships are handled directly with the participants '''\n return 'direct'\n\n @property\n def recipients(self):\n ''' the remote user needs to recieve direct broadcasts '''\n return [u for u in [self.user_subject, self.user_object] if not u.local]\n\n class Meta:\n ''' relationships should be unique '''\n abstract = True\n constraints = [\n models.UniqueConstraint(\n fields=['user_subject', 'user_object'],\n name='%(class)s_unique'\n ),\n models.CheckConstraint(\n check=~models.Q(user_subject=models.F('user_object')),\n name='%(class)s_no_self'\n )\n ]\n\n def get_remote_id(self, status=None):# pylint: disable=arguments-differ\n ''' use shelf identifier in remote_id '''\n status = status or 'follows'\n base_path = self.user_subject.remote_id\n return '%s#%s/%d' % (base_path, status, self.id)\n\n\nclass UserFollows(ActivitypubMixin, UserRelationship):\n ''' Following a user '''\n status = 'follows'\n activity_serializer = activitypub.Follow\n\n\n @classmethod\n def from_request(cls, follow_request):\n ''' converts a follow request into a follow relationship '''\n return cls.objects.create(\n user_subject=follow_request.user_subject,\n user_object=follow_request.user_object,\n remote_id=follow_request.remote_id,\n )\n\n\nclass UserFollowRequest(ActivitypubMixin, UserRelationship):\n ''' following a user requires manual or automatic confirmation '''\n status = 'follow_request'\n activity_serializer = activitypub.Follow\n\n def save(self, *args, broadcast=True, **kwargs):\n ''' make sure the follow or block relationship doesn't already exist '''\n try:\n UserFollows.objects.get(\n user_subject=self.user_subject,\n user_object=self.user_object\n )\n UserBlocks.objects.get(\n user_subject=self.user_subject,\n user_object=self.user_object\n )\n return None\n except (UserFollows.DoesNotExist, UserBlocks.DoesNotExist):\n super().save(*args, **kwargs)\n\n if broadcast and self.user_subject.local and not self.user_object.local:\n self.broadcast(self.to_activity(), self.user_subject)\n\n if self.user_object.local:\n model = apps.get_model('bookwyrm.Notification', require_ready=True)\n notification_type = 'FOLLOW_REQUEST' \\\n if self.user_object.manually_approves_followers else 'FOLLOW'\n model.objects.create(\n user=self.user_object,\n related_user=self.user_subject,\n notification_type=notification_type,\n )\n\n\n def accept(self):\n ''' turn this request into the real deal'''\n user = self.user_object\n activity = activitypub.Accept(\n id=self.get_remote_id(status='accepts'),\n actor=self.user_object.remote_id,\n object=self.to_activity()\n ).serialize()\n with transaction.atomic():\n UserFollows.from_request(self)\n self.delete()\n\n self.broadcast(activity, user)\n\n\n def reject(self):\n ''' generate a Reject for this follow request '''\n user = self.user_object\n activity = activitypub.Reject(\n id=self.get_remote_id(status='rejects'),\n actor=self.user_object.remote_id,\n object=self.to_activity()\n ).serialize()\n self.delete()\n self.broadcast(activity, user)\n\n\nclass UserBlocks(ActivityMixin, UserRelationship):\n ''' prevent another user from following you and seeing your posts '''\n status = 'blocks'\n activity_serializer = activitypub.Block\n\n\n@receiver(models.signals.post_save, sender=UserBlocks)\n#pylint: disable=unused-argument\ndef execute_after_save(sender, instance, created, *args, **kwargs):\n ''' remove follow or follow request rels after a block is created '''\n UserFollows.objects.filter(\n Q(user_subject=instance.user_subject,\n user_object=instance.user_object) | \\\n Q(user_subject=instance.user_object,\n user_object=instance.user_subject)\n ).delete()\n UserFollowRequest.objects.filter(\n Q(user_subject=instance.user_subject,\n user_object=instance.user_object) | \\\n Q(user_subject=instance.user_object,\n user_object=instance.user_subject)\n ).delete()\n", "path": "bookwyrm/models/relationship.py"}]} | 2,120 | 174 |
gh_patches_debug_14520 | rasdani/github-patches | git_diff | shuup__shuup-1677 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Make sure all Dashboard blocks are filtering by the current shop
One case is here:
- https://github.com/shuup/shuup/blob/master/shuup/admin/modules/sales_dashboard/dashboard.py#L278
- https://github.com/shuup/shuup/blob/master/shuup/admin/modules/sales_dashboard/dashboard.py#L277
</issue>
<code>
[start of shuup/admin/modules/sales_dashboard/dashboard.py]
1 # -*- coding: utf-8 -*-
2 # This file is part of Shuup.
3 #
4 # Copyright (c) 2012-2018, Shuup Inc. All rights reserved.
5 #
6 # This source code is licensed under the OSL-3.0 license found in the
7 # LICENSE file in the root directory of this source tree.
8
9 from __future__ import unicode_literals
10
11 from collections import OrderedDict
12 from datetime import date, time, timedelta
13 from decimal import Decimal
14
15 import six
16 from babel.dates import format_date
17 from dateutil import rrule
18 from django.db.models import Avg, Count, Sum
19 from django.utils.translation import ugettext_lazy as _
20
21 from shuup.admin.dashboard import (
22 ChartDataType, ChartType, DashboardChartBlock, DashboardContentBlock,
23 DashboardMoneyBlock, MixedChart
24 )
25 from shuup.core.models import Order, Shop
26 from shuup.core.pricing import TaxfulPrice
27 from shuup.core.utils.query import group_by_period
28 from shuup.utils.dates import get_year_and_month_format, local_now, to_aware
29 from shuup.utils.i18n import get_current_babel_locale
30
31
32 def get_orders_for_shop(request, currency=None):
33 shop = request.shop
34 if not currency:
35 currency = shop.currency
36 return Order.objects.filter(shop=shop, currency=currency)
37
38
39 def get_orders_by_currency(currency):
40 return Order.objects.filter(currency=currency)
41
42
43 def month_iter(start_date, end_date):
44 return ((d.month, d.year) for d in rrule.rrule(rrule.MONTHLY, dtstart=start_date, until=end_date))
45
46
47 class OrderValueChartDashboardBlock(DashboardChartBlock):
48 default_size = "small"
49
50 def __init__(self, id, request, **kwargs):
51 shop = request.shop
52 self.cached_chart = None
53 self.request = request
54 self.currency = shop.currency
55 super(OrderValueChartDashboardBlock, self).__init__(id, **kwargs)
56 self.sort_order = 2
57
58 @property
59 def size(self):
60 data_size = 0
61 for dataset in self.get_chart().datasets:
62 data_size = max(data_size, len(dataset["data"]))
63 # the size will be dynamic. small for periods up to 4 months, otherwise medium
64 return ("medium" if data_size > 4 else "small")
65
66 @size.setter
67 def size(self, value):
68 # do not raise!
69 pass
70
71 def get_chart(self):
72 if self.cached_chart is not None:
73 return self.cached_chart
74
75 chart_options = {
76 "scales": {
77 "yAxes": [{
78 "ticks": {
79 "beginAtZero": True
80 }
81 }]
82 }
83 }
84
85 today = date.today()
86 chart_start_date = today - timedelta(days=365)
87
88 orders = get_orders_for_shop(self.request)
89 sum_sales_data = group_by_period(
90 orders.valid().since((today - chart_start_date).days),
91 "order_date",
92 "month",
93 sum=Sum("taxful_total_price_value")
94 )
95
96 for (month, year) in month_iter(chart_start_date, today):
97 sales_date = date(year, month, 1)
98 if sales_date not in sum_sales_data:
99 sum_sales_data[sales_date] = {"sum": Decimal(0)}
100
101 # sort and recreated the ordered dict since we've put new items into
102 sum_sales_data = OrderedDict(sorted(six.iteritems(sum_sales_data), key=lambda x: x[0]))
103
104 locale = get_current_babel_locale()
105 labels = [
106 format_date(k, format=get_year_and_month_format(locale), locale=locale)
107 for k in sum_sales_data
108 ]
109 mixed_chart = MixedChart(title=_("Sales per Month (past 12 months)"),
110 labels=labels,
111 data_type=ChartDataType.CURRENCY,
112 options=chart_options,
113 currency=self.currency,
114 locale=locale)
115
116 cumulative_sales = []
117 average_sales = []
118
119 # only calculate cumulative and average if there are at least 3 months
120 if len(sum_sales_data) >= 3:
121 count = 0
122 total = Decimal()
123
124 for month_sale in sum_sales_data.values():
125 total = total + month_sale["sum"]
126 cumulative_sales.append(total)
127 average_sales.append(total / (count+1))
128 count = count + 1
129
130 # this will be on top of all bars
131 if average_sales:
132 mixed_chart.add_data(_("Average Sales"), [v for v in average_sales], ChartType.LINE)
133
134 # this will be under the cummulative bars
135 mixed_chart.add_data(_("Sales"), [v["sum"] for v in sum_sales_data.values()], ChartType.BAR)
136
137 # this will be under all others charts
138 if cumulative_sales:
139 mixed_chart.add_data(_("Cumulative Total Sales"), [v for v in cumulative_sales], ChartType.BAR)
140
141 self.cached_chart = mixed_chart
142 return mixed_chart
143
144
145 def get_subtitle(count):
146 return _("Based on %d orders") % count
147
148
149 def get_sales_of_the_day_block(request, currency=None):
150 orders = get_orders_for_shop(request)
151 if not currency:
152 shop = request.shop
153 currency = shop.currency
154
155 # Sales of the day
156 todays_order_data = (
157 orders.complete().since(0)
158 .aggregate(count=Count("id"), sum=Sum("taxful_total_price_value")))
159 return DashboardMoneyBlock(
160 id="todays_order_sum",
161 color="green",
162 title=_("Today's Sales"),
163 value=(todays_order_data.get("sum") or 0),
164 currency=currency,
165 icon="fa fa-calculator",
166 subtitle=get_subtitle(todays_order_data.get("count"))
167 )
168
169
170 def get_lifetime_sales_block(request, currency=None):
171 orders = get_orders_for_shop(request)
172 if not currency:
173 shop = request.shop
174 currency = shop.currency
175
176 # Lifetime sales
177 lifetime_sales_data = orders.complete().aggregate(
178 count=Count("id"),
179 sum=Sum("taxful_total_price_value")
180 )
181
182 return DashboardMoneyBlock(
183 id="lifetime_sales_sum",
184 color="green",
185 title=_("Lifetime Sales"),
186 value=(lifetime_sales_data.get("sum") or 0),
187 currency=currency,
188 icon="fa fa-line-chart",
189 subtitle=get_subtitle(lifetime_sales_data.get("count")),
190 sort_order=1
191 )
192
193
194 def get_avg_purchase_size_block(request, currency=None):
195 orders = get_orders_for_shop(request)
196 shop = request.shop
197 if not currency:
198 currency = shop.currency
199
200 lifetime_sales_data = orders.complete().aggregate(
201 count=Count("id"),
202 sum=Sum("taxful_total_price_value")
203 )
204
205 # Average size of purchase with amount of orders it is calculated from
206 average_purchase_size = (
207 Order.objects.filter(shop=shop)
208 .aggregate(count=Count("id"), sum=Avg("taxful_total_price_value")))
209 return DashboardMoneyBlock(
210 id="average_purchase_sum",
211 color="blue",
212 title=_("Average Purchase"),
213 value=(average_purchase_size.get("sum") or 0),
214 currency=currency,
215 icon="fa fa-shopping-cart",
216 subtitle=get_subtitle(lifetime_sales_data.get("count")),
217 sort_order=1
218 )
219
220
221 def get_open_orders_block(request, currency=None):
222 orders = get_orders_for_shop(request)
223 if not currency:
224 shop = request.shop
225 currency = shop.currency
226
227 # Open orders / open orders value
228 open_order_data = (
229 orders.incomplete()
230 .aggregate(count=Count("id"), sum=Sum("taxful_total_price_value")))
231
232 return DashboardMoneyBlock(
233 id="open_orders_sum",
234 color="orange",
235 title=_("Open Orders Value"),
236 value=TaxfulPrice((open_order_data.get("sum") or 0), currency),
237 currency=currency,
238 icon="fa fa-inbox",
239 subtitle=get_subtitle(open_order_data.get("count")),
240 sort_order=1
241 )
242
243
244 def get_order_value_chart_dashboard_block(request, currency):
245 return OrderValueChartDashboardBlock(id="order_value_chart", request=request)
246
247
248 def get_order_overview_for_date_range(currency, start_date, end_date, shop=None):
249 if not shop:
250 shop = Shop.objects.first()
251 orders = get_orders_by_currency(currency).filter(shop=shop).complete()
252
253 orders_in_range = orders.in_date_range(start_date, end_date)
254 q = orders_in_range.aggregate(
255 num_orders=Count("id"),
256 num_customers=Count("customer", distinct=True),
257 sales=Sum("taxful_total_price_value"))
258 anon_orders = orders_in_range.filter(customer__isnull=True).aggregate(
259 num_orders=Count("id"))
260 q["num_customers"] += anon_orders["num_orders"]
261 q["sales"] = TaxfulPrice(q["sales"] or 0, currency)
262 return q
263
264
265 def get_shop_overview_block(request, currency, for_date=None):
266 end = to_aware(for_date, time=time.max) if for_date else local_now()
267 start_of_day = to_aware(end.date(), time=time.min)
268 start_of_month = start_of_day.replace(day=1)
269 start_of_year = start_of_day.replace(month=1, day=1)
270 shop = request.shop
271
272 if not currency:
273 currency = shop.currency
274
275 daily = get_order_overview_for_date_range(currency, start_of_day, end, shop=shop)
276 mtd = get_order_overview_for_date_range(currency, start_of_month, end, shop=shop)
277 ytd = get_order_overview_for_date_range(currency, start_of_year, end)
278 totals = get_orders_by_currency(currency).complete().aggregate(
279 num_orders=Count("id"),
280 num_customers=Count("customer", distinct=True),
281 sales=Sum("taxful_total_price_value")
282 )
283 anon_orders = get_orders_by_currency(currency).complete().filter(customer__isnull=True, shop=shop).aggregate(
284 num_orders=Count("id"))
285 totals["num_customers"] += anon_orders["num_orders"]
286 totals["sales"] = TaxfulPrice(totals["sales"] or 0, currency)
287 block = DashboardContentBlock.by_rendering_template(
288 "store_overview", request, "shuup/admin/sales_dashboard/_store_overview_dashboard_block.jinja", {
289 "daily": daily,
290 "mtd": mtd,
291 "ytd": ytd,
292 "totals": totals
293 })
294 block.size = "medium"
295 block.sort_order = 0
296 return block
297
298
299 def get_recent_orders_block(request, currency=None):
300 orders = get_orders_for_shop(request).valid().order_by("-order_date")[:5]
301
302 block = DashboardContentBlock.by_rendering_template(
303 "recent_orders", request, "shuup/admin/sales_dashboard/_recent_orders_dashboard_block.jinja", {
304 "orders": orders
305 }
306 )
307 block.size = "medium"
308 block.sort_order = 2
309 return block
310
[end of shuup/admin/modules/sales_dashboard/dashboard.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/shuup/admin/modules/sales_dashboard/dashboard.py b/shuup/admin/modules/sales_dashboard/dashboard.py
--- a/shuup/admin/modules/sales_dashboard/dashboard.py
+++ b/shuup/admin/modules/sales_dashboard/dashboard.py
@@ -274,8 +274,8 @@
daily = get_order_overview_for_date_range(currency, start_of_day, end, shop=shop)
mtd = get_order_overview_for_date_range(currency, start_of_month, end, shop=shop)
- ytd = get_order_overview_for_date_range(currency, start_of_year, end)
- totals = get_orders_by_currency(currency).complete().aggregate(
+ ytd = get_order_overview_for_date_range(currency, start_of_year, end, shop=shop)
+ totals = get_orders_by_currency(currency).complete().filter(shop=shop).aggregate(
num_orders=Count("id"),
num_customers=Count("customer", distinct=True),
sales=Sum("taxful_total_price_value")
| {"golden_diff": "diff --git a/shuup/admin/modules/sales_dashboard/dashboard.py b/shuup/admin/modules/sales_dashboard/dashboard.py\n--- a/shuup/admin/modules/sales_dashboard/dashboard.py\n+++ b/shuup/admin/modules/sales_dashboard/dashboard.py\n@@ -274,8 +274,8 @@\n \n daily = get_order_overview_for_date_range(currency, start_of_day, end, shop=shop)\n mtd = get_order_overview_for_date_range(currency, start_of_month, end, shop=shop)\n- ytd = get_order_overview_for_date_range(currency, start_of_year, end)\n- totals = get_orders_by_currency(currency).complete().aggregate(\n+ ytd = get_order_overview_for_date_range(currency, start_of_year, end, shop=shop)\n+ totals = get_orders_by_currency(currency).complete().filter(shop=shop).aggregate(\n num_orders=Count(\"id\"),\n num_customers=Count(\"customer\", distinct=True),\n sales=Sum(\"taxful_total_price_value\")\n", "issue": "Make sure all Dashboard blocks are filtering by the current shop\nOne case is here:\r\n- https://github.com/shuup/shuup/blob/master/shuup/admin/modules/sales_dashboard/dashboard.py#L278\r\n- https://github.com/shuup/shuup/blob/master/shuup/admin/modules/sales_dashboard/dashboard.py#L277\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of Shuup.\n#\n# Copyright (c) 2012-2018, Shuup Inc. All rights reserved.\n#\n# This source code is licensed under the OSL-3.0 license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom __future__ import unicode_literals\n\nfrom collections import OrderedDict\nfrom datetime import date, time, timedelta\nfrom decimal import Decimal\n\nimport six\nfrom babel.dates import format_date\nfrom dateutil import rrule\nfrom django.db.models import Avg, Count, Sum\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom shuup.admin.dashboard import (\n ChartDataType, ChartType, DashboardChartBlock, DashboardContentBlock,\n DashboardMoneyBlock, MixedChart\n)\nfrom shuup.core.models import Order, Shop\nfrom shuup.core.pricing import TaxfulPrice\nfrom shuup.core.utils.query import group_by_period\nfrom shuup.utils.dates import get_year_and_month_format, local_now, to_aware\nfrom shuup.utils.i18n import get_current_babel_locale\n\n\ndef get_orders_for_shop(request, currency=None):\n shop = request.shop\n if not currency:\n currency = shop.currency\n return Order.objects.filter(shop=shop, currency=currency)\n\n\ndef get_orders_by_currency(currency):\n return Order.objects.filter(currency=currency)\n\n\ndef month_iter(start_date, end_date):\n return ((d.month, d.year) for d in rrule.rrule(rrule.MONTHLY, dtstart=start_date, until=end_date))\n\n\nclass OrderValueChartDashboardBlock(DashboardChartBlock):\n default_size = \"small\"\n\n def __init__(self, id, request, **kwargs):\n shop = request.shop\n self.cached_chart = None\n self.request = request\n self.currency = shop.currency\n super(OrderValueChartDashboardBlock, self).__init__(id, **kwargs)\n self.sort_order = 2\n\n @property\n def size(self):\n data_size = 0\n for dataset in self.get_chart().datasets:\n data_size = max(data_size, len(dataset[\"data\"]))\n # the size will be dynamic. small for periods up to 4 months, otherwise medium\n return (\"medium\" if data_size > 4 else \"small\")\n\n @size.setter\n def size(self, value):\n # do not raise!\n pass\n\n def get_chart(self):\n if self.cached_chart is not None:\n return self.cached_chart\n\n chart_options = {\n \"scales\": {\n \"yAxes\": [{\n \"ticks\": {\n \"beginAtZero\": True\n }\n }]\n }\n }\n\n today = date.today()\n chart_start_date = today - timedelta(days=365)\n\n orders = get_orders_for_shop(self.request)\n sum_sales_data = group_by_period(\n orders.valid().since((today - chart_start_date).days),\n \"order_date\",\n \"month\",\n sum=Sum(\"taxful_total_price_value\")\n )\n\n for (month, year) in month_iter(chart_start_date, today):\n sales_date = date(year, month, 1)\n if sales_date not in sum_sales_data:\n sum_sales_data[sales_date] = {\"sum\": Decimal(0)}\n\n # sort and recreated the ordered dict since we've put new items into\n sum_sales_data = OrderedDict(sorted(six.iteritems(sum_sales_data), key=lambda x: x[0]))\n\n locale = get_current_babel_locale()\n labels = [\n format_date(k, format=get_year_and_month_format(locale), locale=locale)\n for k in sum_sales_data\n ]\n mixed_chart = MixedChart(title=_(\"Sales per Month (past 12 months)\"),\n labels=labels,\n data_type=ChartDataType.CURRENCY,\n options=chart_options,\n currency=self.currency,\n locale=locale)\n\n cumulative_sales = []\n average_sales = []\n\n # only calculate cumulative and average if there are at least 3 months\n if len(sum_sales_data) >= 3:\n count = 0\n total = Decimal()\n\n for month_sale in sum_sales_data.values():\n total = total + month_sale[\"sum\"]\n cumulative_sales.append(total)\n average_sales.append(total / (count+1))\n count = count + 1\n\n # this will be on top of all bars\n if average_sales:\n mixed_chart.add_data(_(\"Average Sales\"), [v for v in average_sales], ChartType.LINE)\n\n # this will be under the cummulative bars\n mixed_chart.add_data(_(\"Sales\"), [v[\"sum\"] for v in sum_sales_data.values()], ChartType.BAR)\n\n # this will be under all others charts\n if cumulative_sales:\n mixed_chart.add_data(_(\"Cumulative Total Sales\"), [v for v in cumulative_sales], ChartType.BAR)\n\n self.cached_chart = mixed_chart\n return mixed_chart\n\n\ndef get_subtitle(count):\n return _(\"Based on %d orders\") % count\n\n\ndef get_sales_of_the_day_block(request, currency=None):\n orders = get_orders_for_shop(request)\n if not currency:\n shop = request.shop\n currency = shop.currency\n\n # Sales of the day\n todays_order_data = (\n orders.complete().since(0)\n .aggregate(count=Count(\"id\"), sum=Sum(\"taxful_total_price_value\")))\n return DashboardMoneyBlock(\n id=\"todays_order_sum\",\n color=\"green\",\n title=_(\"Today's Sales\"),\n value=(todays_order_data.get(\"sum\") or 0),\n currency=currency,\n icon=\"fa fa-calculator\",\n subtitle=get_subtitle(todays_order_data.get(\"count\"))\n )\n\n\ndef get_lifetime_sales_block(request, currency=None):\n orders = get_orders_for_shop(request)\n if not currency:\n shop = request.shop\n currency = shop.currency\n\n # Lifetime sales\n lifetime_sales_data = orders.complete().aggregate(\n count=Count(\"id\"),\n sum=Sum(\"taxful_total_price_value\")\n )\n\n return DashboardMoneyBlock(\n id=\"lifetime_sales_sum\",\n color=\"green\",\n title=_(\"Lifetime Sales\"),\n value=(lifetime_sales_data.get(\"sum\") or 0),\n currency=currency,\n icon=\"fa fa-line-chart\",\n subtitle=get_subtitle(lifetime_sales_data.get(\"count\")),\n sort_order=1\n )\n\n\ndef get_avg_purchase_size_block(request, currency=None):\n orders = get_orders_for_shop(request)\n shop = request.shop\n if not currency:\n currency = shop.currency\n\n lifetime_sales_data = orders.complete().aggregate(\n count=Count(\"id\"),\n sum=Sum(\"taxful_total_price_value\")\n )\n\n # Average size of purchase with amount of orders it is calculated from\n average_purchase_size = (\n Order.objects.filter(shop=shop)\n .aggregate(count=Count(\"id\"), sum=Avg(\"taxful_total_price_value\")))\n return DashboardMoneyBlock(\n id=\"average_purchase_sum\",\n color=\"blue\",\n title=_(\"Average Purchase\"),\n value=(average_purchase_size.get(\"sum\") or 0),\n currency=currency,\n icon=\"fa fa-shopping-cart\",\n subtitle=get_subtitle(lifetime_sales_data.get(\"count\")),\n sort_order=1\n )\n\n\ndef get_open_orders_block(request, currency=None):\n orders = get_orders_for_shop(request)\n if not currency:\n shop = request.shop\n currency = shop.currency\n\n # Open orders / open orders value\n open_order_data = (\n orders.incomplete()\n .aggregate(count=Count(\"id\"), sum=Sum(\"taxful_total_price_value\")))\n\n return DashboardMoneyBlock(\n id=\"open_orders_sum\",\n color=\"orange\",\n title=_(\"Open Orders Value\"),\n value=TaxfulPrice((open_order_data.get(\"sum\") or 0), currency),\n currency=currency,\n icon=\"fa fa-inbox\",\n subtitle=get_subtitle(open_order_data.get(\"count\")),\n sort_order=1\n )\n\n\ndef get_order_value_chart_dashboard_block(request, currency):\n return OrderValueChartDashboardBlock(id=\"order_value_chart\", request=request)\n\n\ndef get_order_overview_for_date_range(currency, start_date, end_date, shop=None):\n if not shop:\n shop = Shop.objects.first()\n orders = get_orders_by_currency(currency).filter(shop=shop).complete()\n\n orders_in_range = orders.in_date_range(start_date, end_date)\n q = orders_in_range.aggregate(\n num_orders=Count(\"id\"),\n num_customers=Count(\"customer\", distinct=True),\n sales=Sum(\"taxful_total_price_value\"))\n anon_orders = orders_in_range.filter(customer__isnull=True).aggregate(\n num_orders=Count(\"id\"))\n q[\"num_customers\"] += anon_orders[\"num_orders\"]\n q[\"sales\"] = TaxfulPrice(q[\"sales\"] or 0, currency)\n return q\n\n\ndef get_shop_overview_block(request, currency, for_date=None):\n end = to_aware(for_date, time=time.max) if for_date else local_now()\n start_of_day = to_aware(end.date(), time=time.min)\n start_of_month = start_of_day.replace(day=1)\n start_of_year = start_of_day.replace(month=1, day=1)\n shop = request.shop\n\n if not currency:\n currency = shop.currency\n\n daily = get_order_overview_for_date_range(currency, start_of_day, end, shop=shop)\n mtd = get_order_overview_for_date_range(currency, start_of_month, end, shop=shop)\n ytd = get_order_overview_for_date_range(currency, start_of_year, end)\n totals = get_orders_by_currency(currency).complete().aggregate(\n num_orders=Count(\"id\"),\n num_customers=Count(\"customer\", distinct=True),\n sales=Sum(\"taxful_total_price_value\")\n )\n anon_orders = get_orders_by_currency(currency).complete().filter(customer__isnull=True, shop=shop).aggregate(\n num_orders=Count(\"id\"))\n totals[\"num_customers\"] += anon_orders[\"num_orders\"]\n totals[\"sales\"] = TaxfulPrice(totals[\"sales\"] or 0, currency)\n block = DashboardContentBlock.by_rendering_template(\n \"store_overview\", request, \"shuup/admin/sales_dashboard/_store_overview_dashboard_block.jinja\", {\n \"daily\": daily,\n \"mtd\": mtd,\n \"ytd\": ytd,\n \"totals\": totals\n })\n block.size = \"medium\"\n block.sort_order = 0\n return block\n\n\ndef get_recent_orders_block(request, currency=None):\n orders = get_orders_for_shop(request).valid().order_by(\"-order_date\")[:5]\n\n block = DashboardContentBlock.by_rendering_template(\n \"recent_orders\", request, \"shuup/admin/sales_dashboard/_recent_orders_dashboard_block.jinja\", {\n \"orders\": orders\n }\n )\n block.size = \"medium\"\n block.sort_order = 2\n return block\n", "path": "shuup/admin/modules/sales_dashboard/dashboard.py"}]} | 3,850 | 221 |
gh_patches_debug_60893 | rasdani/github-patches | git_diff | webkom__lego-2342 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Phone number not saved from registration form
When creating a new user, LEGO ignores the phone number inserted into the registration form.
</issue>
<code>
[start of lego/apps/users/serializers/registration.py]
1 from django.contrib.auth import password_validation
2 from rest_framework import exceptions, serializers
3
4 from lego.apps.users.models import User
5 from lego.utils.functions import verify_captcha
6
7
8 class RegistrationSerializer(serializers.ModelSerializer):
9 captcha_response = serializers.CharField(required=True)
10
11 def validate_captcha_response(self, captcha_response):
12 if not verify_captcha(captcha_response):
13 raise exceptions.ValidationError("invalid_captcha")
14 return captcha_response
15
16 class Meta:
17 model = User
18 fields = ("email", "captcha_response")
19
20
21 class RegistrationConfirmationSerializer(serializers.ModelSerializer):
22
23 password = serializers.CharField(required=True, write_only=True)
24
25 def validate_username(self, username):
26 username_exists = User.objects.filter(username__iexact=username).exists()
27 if username_exists:
28 raise exceptions.ValidationError("Username exists")
29 return username
30
31 def validate_password(self, password):
32 password_validation.validate_password(password)
33 return password
34
35 class Meta:
36 model = User
37 fields = (
38 "username",
39 "first_name",
40 "last_name",
41 "gender",
42 "password",
43 "allergies",
44 )
45
[end of lego/apps/users/serializers/registration.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lego/apps/users/serializers/registration.py b/lego/apps/users/serializers/registration.py
--- a/lego/apps/users/serializers/registration.py
+++ b/lego/apps/users/serializers/registration.py
@@ -41,4 +41,5 @@
"gender",
"password",
"allergies",
+ "phone_number",
)
| {"golden_diff": "diff --git a/lego/apps/users/serializers/registration.py b/lego/apps/users/serializers/registration.py\n--- a/lego/apps/users/serializers/registration.py\n+++ b/lego/apps/users/serializers/registration.py\n@@ -41,4 +41,5 @@\n \"gender\",\n \"password\",\n \"allergies\",\n+ \"phone_number\",\n )\n", "issue": "Phone number not saved from registration form\nWhen creating a new user, LEGO ignores the phone number inserted into the registration form.\n", "before_files": [{"content": "from django.contrib.auth import password_validation\nfrom rest_framework import exceptions, serializers\n\nfrom lego.apps.users.models import User\nfrom lego.utils.functions import verify_captcha\n\n\nclass RegistrationSerializer(serializers.ModelSerializer):\n captcha_response = serializers.CharField(required=True)\n\n def validate_captcha_response(self, captcha_response):\n if not verify_captcha(captcha_response):\n raise exceptions.ValidationError(\"invalid_captcha\")\n return captcha_response\n\n class Meta:\n model = User\n fields = (\"email\", \"captcha_response\")\n\n\nclass RegistrationConfirmationSerializer(serializers.ModelSerializer):\n\n password = serializers.CharField(required=True, write_only=True)\n\n def validate_username(self, username):\n username_exists = User.objects.filter(username__iexact=username).exists()\n if username_exists:\n raise exceptions.ValidationError(\"Username exists\")\n return username\n\n def validate_password(self, password):\n password_validation.validate_password(password)\n return password\n\n class Meta:\n model = User\n fields = (\n \"username\",\n \"first_name\",\n \"last_name\",\n \"gender\",\n \"password\",\n \"allergies\",\n )\n", "path": "lego/apps/users/serializers/registration.py"}]} | 887 | 90 |
gh_patches_debug_5596 | rasdani/github-patches | git_diff | Lightning-AI__torchmetrics-1572 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
0.11.2 and 0.11.3 not released on PyPI
The 0.11.2 and 0.11.3 releases have both been released on GitHub, but haven't yet been released on PyPI. Is this planned for the future, or is the release process broken?
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 import glob
3 import os
4 import re
5 from functools import partial
6 from importlib.util import module_from_spec, spec_from_file_location
7 from itertools import chain
8 from pathlib import Path
9 from typing import Any, Iterable, Iterator, List, Optional, Tuple, Union
10
11 from pkg_resources import Requirement, yield_lines
12 from setuptools import find_packages, setup
13
14 _PATH_ROOT = os.path.realpath(os.path.dirname(__file__))
15 _PATH_SOURCE = os.path.join(_PATH_ROOT, "src")
16 _PATH_REQUIRE = os.path.join(_PATH_ROOT, "requirements")
17 _FREEZE_REQUIREMENTS = bool(int(os.environ.get("FREEZE_REQUIREMENTS", 0)))
18
19
20 class _RequirementWithComment(Requirement):
21 strict_string = "# strict"
22
23 def __init__(self, *args: Any, comment: str = "", pip_argument: Optional[str] = None, **kwargs: Any) -> None:
24 super().__init__(*args, **kwargs)
25 self.comment = comment
26 assert pip_argument is None or pip_argument # sanity check that it's not an empty str
27 self.pip_argument = pip_argument
28 self.strict = self.strict_string in comment.lower()
29
30 def adjust(self, unfreeze: bool) -> str:
31 """Remove version restrictions unless they are strict.
32
33 >>> _RequirementWithComment("arrow<=1.2.2,>=1.2.0", comment="# anything").adjust(False)
34 'arrow<=1.2.2,>=1.2.0'
35 >>> _RequirementWithComment("arrow<=1.2.2,>=1.2.0", comment="# strict").adjust(False)
36 'arrow<=1.2.2,>=1.2.0 # strict'
37 >>> _RequirementWithComment("arrow<=1.2.2,>=1.2.0", comment="# my name").adjust(True)
38 'arrow>=1.2.0'
39 >>> _RequirementWithComment("arrow>=1.2.0, <=1.2.2", comment="# strict").adjust(True)
40 'arrow<=1.2.2,>=1.2.0 # strict'
41 >>> _RequirementWithComment("arrow").adjust(True)
42 'arrow'
43 """
44 out = str(self)
45 if self.strict:
46 return f"{out} {self.strict_string}"
47 if unfreeze:
48 for operator, version in self.specs:
49 if operator in ("<", "<="):
50 # drop upper bound
51 return out.replace(f"{operator}{version},", "")
52 return out
53
54
55 def _parse_requirements(strs: Union[str, Iterable[str]]) -> Iterator[_RequirementWithComment]:
56 r"""Adapted from `pkg_resources.parse_requirements` to include comments.
57
58 >>> txt = ['# ignored', '', 'this # is an', '--piparg', 'example', 'foo # strict', 'thing', '-r different/file.txt']
59 >>> [r.adjust('none') for r in _parse_requirements(txt)]
60 ['this', 'example', 'foo # strict', 'thing']
61 >>> txt = '\\n'.join(txt)
62 >>> [r.adjust('none') for r in _parse_requirements(txt)]
63 ['this', 'example', 'foo # strict', 'thing']
64 """
65 lines = yield_lines(strs)
66 pip_argument = None
67 for line in lines:
68 # Drop comments -- a hash without a space may be in a URL.
69 if " #" in line:
70 comment_pos = line.find(" #")
71 line, comment = line[:comment_pos], line[comment_pos:]
72 else:
73 comment = ""
74 # If there is a line continuation, drop it, and append the next line.
75 if line.endswith("\\"):
76 line = line[:-2].strip()
77 try:
78 line += next(lines)
79 except StopIteration:
80 return
81 # If there's a pip argument, save it
82 if line.startswith("--"):
83 pip_argument = line
84 continue
85 if line.startswith("-r "):
86 # linked requirement files are unsupported
87 continue
88 yield _RequirementWithComment(line, comment=comment, pip_argument=pip_argument)
89 pip_argument = None
90
91
92 def _load_requirements(
93 path_dir: str, file_name: str = "requirements.txt", unfreeze: bool = not _FREEZE_REQUIREMENTS
94 ) -> List[str]:
95 """Load requirements from a file.
96
97 >>> _load_requirements(_PATH_ROOT)
98 ['numpy...', 'torch..."]
99 """
100 path = Path(path_dir) / file_name
101 assert path.exists(), (path_dir, file_name, path)
102 text = path.read_text()
103 return [req.adjust(unfreeze) for req in _parse_requirements(text)]
104
105
106 def _load_readme_description(path_dir: str, homepage: str, version: str) -> str:
107 """Load readme as decribtion.
108
109 >>> _load_readme_description(_PATH_ROOT, "", "")
110 '<div align="center">...'
111 """
112 path_readme = os.path.join(path_dir, "README.md")
113 with open(path_readme, encoding="utf-8") as fp:
114 text = fp.read()
115
116 # https://github.com/Lightning-AI/torchmetrics/raw/master/docs/source/_static/images/lightning_module/pt_to_pl.png
117 github_source_url = os.path.join(homepage, "raw", version)
118 # replace relative repository path to absolute link to the release
119 # do not replace all "docs" as in the readme we replace some other sources with particular path to docs
120 text = text.replace("docs/source/_static/", f"{os.path.join(github_source_url, 'docs/source/_static/')}")
121
122 # readthedocs badge
123 text = text.replace("badge/?version=stable", f"badge/?version={version}")
124 text = text.replace("torchmetrics.readthedocs.io/en/stable/", f"torchmetrics.readthedocs.io/en/{version}")
125 # codecov badge
126 text = text.replace("/branch/master/graph/badge.svg", f"/release/{version}/graph/badge.svg")
127 # replace github badges for release ones
128 text = text.replace("badge.svg?branch=master&event=push", f"badge.svg?tag={version}")
129 # Azure...
130 text = text.replace("?branchName=master", f"?branchName=refs%2Ftags%2F{version}")
131 text = re.sub(r"\?definitionId=\d+&branchName=master", f"?definitionId=2&branchName=refs%2Ftags%2F{version}", text)
132
133 skip_begin = r"<!-- following section will be skipped from PyPI description -->"
134 skip_end = r"<!-- end skipping PyPI description -->"
135 # todo: wrap content as commented description
136 return re.sub(rf"{skip_begin}.+?{skip_end}", "<!-- -->", text, flags=re.IGNORECASE + re.DOTALL)
137
138
139 def _load_py_module(fname, pkg="torchmetrics"):
140 spec = spec_from_file_location(os.path.join(pkg, fname), os.path.join(_PATH_SOURCE, pkg, fname))
141 py = module_from_spec(spec)
142 spec.loader.exec_module(py)
143 return py
144
145
146 ABOUT = _load_py_module("__about__.py")
147 LONG_DESCRIPTION = _load_readme_description(
148 _PATH_ROOT,
149 homepage=ABOUT.__homepage__,
150 version=f"v{ABOUT.__version__}",
151 )
152 BASE_REQUIREMENTS = _load_requirements(path_dir=_PATH_ROOT, file_name="requirements.txt")
153
154
155 def _prepare_extras(skip_files: Tuple[str] = ("devel.txt", "doctest.txt", "integrate.txt", "docs.txt")):
156 # find all extra requirements
157 _load_req = partial(_load_requirements, path_dir=_PATH_REQUIRE)
158 found_req_files = sorted(os.path.basename(p) for p in glob.glob(os.path.join(_PATH_REQUIRE, "*.txt")))
159 # filter unwanted files
160 found_req_files = [n for n in found_req_files if n not in skip_files]
161 found_req_names = [os.path.splitext(req)[0] for req in found_req_files]
162 # define basic and extra extras
163 extras_req = {
164 name: _load_req(file_name=fname) for name, fname in zip(found_req_names, found_req_files) if "_test" not in name
165 }
166 for name, fname in zip(found_req_names, found_req_files):
167 if "_test" in name:
168 extras_req["test"] += _load_req(file_name=fname)
169 # filter the uniques
170 extras_req = {n: list(set(req)) for n, req in extras_req.items()}
171 # create an 'all' keyword that install all possible dependencies
172 extras_req["all"] = list(chain([pkgs for k, pkgs in extras_req.items() if k not in ("test", "docs")]))
173 extras_req["dev"] = extras_req["all"] + extras_req["test"]
174 return extras_req
175
176
177 # https://packaging.python.org/discussions/install-requires-vs-requirements /
178 # keep the meta-data here for simplicity in reading this file... it's not obvious
179 # what happens and to non-engineers they won't know to look in init ...
180 # the goal of the project is simplicity for researchers, don't want to add too much
181 # engineer specific practices
182 if __name__ == "__main__":
183 setup(
184 name="torchmetrics",
185 version=ABOUT.__version__,
186 description=ABOUT.__docs__,
187 author=ABOUT.__author__,
188 author_email=ABOUT.__author_email__,
189 url=ABOUT.__homepage__,
190 download_url=os.path.join(ABOUT.__homepage__, "archive", "master.zip"),
191 license=ABOUT.__license__,
192 packages=find_packages(where="src"),
193 package_dir={"": "src"},
194 long_description=LONG_DESCRIPTION,
195 long_description_content_type="text/markdown",
196 include_package_data=True,
197 zip_safe=False,
198 keywords=["deep learning", "machine learning", "pytorch", "metrics", "AI"],
199 python_requires=">=3.7",
200 setup_requires=[],
201 install_requires=BASE_REQUIREMENTS,
202 extras_require=_prepare_extras(),
203 project_urls={
204 "Bug Tracker": os.path.join(ABOUT.__homepage__, "issues"),
205 "Documentation": "https://torchmetrics.rtfd.io/en/latest/",
206 "Source Code": ABOUT.__homepage__,
207 },
208 classifiers=[
209 "Environment :: Console",
210 "Natural Language :: English",
211 # How mature is this project? Common values are
212 # 3 - Alpha, 4 - Beta, 5 - Production/Stable
213 "Development Status :: 5 - Production/Stable",
214 # Indicate who your project is intended for
215 "Intended Audience :: Developers",
216 "Topic :: Scientific/Engineering :: Artificial Intelligence",
217 "Topic :: Scientific/Engineering :: Image Recognition",
218 "Topic :: Scientific/Engineering :: Information Analysis",
219 # Pick your license as you wish
220 "License :: OSI Approved :: Apache Software License",
221 "Operating System :: OS Independent",
222 # Specify the Python versions you support here. In particular, ensure
223 # that you indicate whether you support Python 2, Python 3 or both.
224 "Programming Language :: Python :: 3",
225 "Programming Language :: Python :: 3.7",
226 "Programming Language :: Python :: 3.8",
227 "Programming Language :: Python :: 3.9",
228 "Programming Language :: Python :: 3.10",
229 ],
230 )
231
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -78,6 +78,9 @@
line += next(lines)
except StopIteration:
return
+ if "@" in line or re.search("https?://", line):
+ # skip lines with links like `pesq @ git+https://github.com/ludlows/python-pesq`
+ continue
# If there's a pip argument, save it
if line.startswith("--"):
pip_argument = line
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -78,6 +78,9 @@\n line += next(lines)\n except StopIteration:\n return\n+ if \"@\" in line or re.search(\"https?://\", line):\n+ # skip lines with links like `pesq @ git+https://github.com/ludlows/python-pesq`\n+ continue\n # If there's a pip argument, save it\n if line.startswith(\"--\"):\n pip_argument = line\n", "issue": "0.11.2 and 0.11.3 not released on PyPI\nThe 0.11.2 and 0.11.3 releases have both been released on GitHub, but haven't yet been released on PyPI. Is this planned for the future, or is the release process broken?\n", "before_files": [{"content": "#!/usr/bin/env python\nimport glob\nimport os\nimport re\nfrom functools import partial\nfrom importlib.util import module_from_spec, spec_from_file_location\nfrom itertools import chain\nfrom pathlib import Path\nfrom typing import Any, Iterable, Iterator, List, Optional, Tuple, Union\n\nfrom pkg_resources import Requirement, yield_lines\nfrom setuptools import find_packages, setup\n\n_PATH_ROOT = os.path.realpath(os.path.dirname(__file__))\n_PATH_SOURCE = os.path.join(_PATH_ROOT, \"src\")\n_PATH_REQUIRE = os.path.join(_PATH_ROOT, \"requirements\")\n_FREEZE_REQUIREMENTS = bool(int(os.environ.get(\"FREEZE_REQUIREMENTS\", 0)))\n\n\nclass _RequirementWithComment(Requirement):\n strict_string = \"# strict\"\n\n def __init__(self, *args: Any, comment: str = \"\", pip_argument: Optional[str] = None, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n self.comment = comment\n assert pip_argument is None or pip_argument # sanity check that it's not an empty str\n self.pip_argument = pip_argument\n self.strict = self.strict_string in comment.lower()\n\n def adjust(self, unfreeze: bool) -> str:\n \"\"\"Remove version restrictions unless they are strict.\n\n >>> _RequirementWithComment(\"arrow<=1.2.2,>=1.2.0\", comment=\"# anything\").adjust(False)\n 'arrow<=1.2.2,>=1.2.0'\n >>> _RequirementWithComment(\"arrow<=1.2.2,>=1.2.0\", comment=\"# strict\").adjust(False)\n 'arrow<=1.2.2,>=1.2.0 # strict'\n >>> _RequirementWithComment(\"arrow<=1.2.2,>=1.2.0\", comment=\"# my name\").adjust(True)\n 'arrow>=1.2.0'\n >>> _RequirementWithComment(\"arrow>=1.2.0, <=1.2.2\", comment=\"# strict\").adjust(True)\n 'arrow<=1.2.2,>=1.2.0 # strict'\n >>> _RequirementWithComment(\"arrow\").adjust(True)\n 'arrow'\n \"\"\"\n out = str(self)\n if self.strict:\n return f\"{out} {self.strict_string}\"\n if unfreeze:\n for operator, version in self.specs:\n if operator in (\"<\", \"<=\"):\n # drop upper bound\n return out.replace(f\"{operator}{version},\", \"\")\n return out\n\n\ndef _parse_requirements(strs: Union[str, Iterable[str]]) -> Iterator[_RequirementWithComment]:\n r\"\"\"Adapted from `pkg_resources.parse_requirements` to include comments.\n\n >>> txt = ['# ignored', '', 'this # is an', '--piparg', 'example', 'foo # strict', 'thing', '-r different/file.txt']\n >>> [r.adjust('none') for r in _parse_requirements(txt)]\n ['this', 'example', 'foo # strict', 'thing']\n >>> txt = '\\\\n'.join(txt)\n >>> [r.adjust('none') for r in _parse_requirements(txt)]\n ['this', 'example', 'foo # strict', 'thing']\n \"\"\"\n lines = yield_lines(strs)\n pip_argument = None\n for line in lines:\n # Drop comments -- a hash without a space may be in a URL.\n if \" #\" in line:\n comment_pos = line.find(\" #\")\n line, comment = line[:comment_pos], line[comment_pos:]\n else:\n comment = \"\"\n # If there is a line continuation, drop it, and append the next line.\n if line.endswith(\"\\\\\"):\n line = line[:-2].strip()\n try:\n line += next(lines)\n except StopIteration:\n return\n # If there's a pip argument, save it\n if line.startswith(\"--\"):\n pip_argument = line\n continue\n if line.startswith(\"-r \"):\n # linked requirement files are unsupported\n continue\n yield _RequirementWithComment(line, comment=comment, pip_argument=pip_argument)\n pip_argument = None\n\n\ndef _load_requirements(\n path_dir: str, file_name: str = \"requirements.txt\", unfreeze: bool = not _FREEZE_REQUIREMENTS\n) -> List[str]:\n \"\"\"Load requirements from a file.\n\n >>> _load_requirements(_PATH_ROOT)\n ['numpy...', 'torch...\"]\n \"\"\"\n path = Path(path_dir) / file_name\n assert path.exists(), (path_dir, file_name, path)\n text = path.read_text()\n return [req.adjust(unfreeze) for req in _parse_requirements(text)]\n\n\ndef _load_readme_description(path_dir: str, homepage: str, version: str) -> str:\n \"\"\"Load readme as decribtion.\n\n >>> _load_readme_description(_PATH_ROOT, \"\", \"\")\n '<div align=\"center\">...'\n \"\"\"\n path_readme = os.path.join(path_dir, \"README.md\")\n with open(path_readme, encoding=\"utf-8\") as fp:\n text = fp.read()\n\n # https://github.com/Lightning-AI/torchmetrics/raw/master/docs/source/_static/images/lightning_module/pt_to_pl.png\n github_source_url = os.path.join(homepage, \"raw\", version)\n # replace relative repository path to absolute link to the release\n # do not replace all \"docs\" as in the readme we replace some other sources with particular path to docs\n text = text.replace(\"docs/source/_static/\", f\"{os.path.join(github_source_url, 'docs/source/_static/')}\")\n\n # readthedocs badge\n text = text.replace(\"badge/?version=stable\", f\"badge/?version={version}\")\n text = text.replace(\"torchmetrics.readthedocs.io/en/stable/\", f\"torchmetrics.readthedocs.io/en/{version}\")\n # codecov badge\n text = text.replace(\"/branch/master/graph/badge.svg\", f\"/release/{version}/graph/badge.svg\")\n # replace github badges for release ones\n text = text.replace(\"badge.svg?branch=master&event=push\", f\"badge.svg?tag={version}\")\n # Azure...\n text = text.replace(\"?branchName=master\", f\"?branchName=refs%2Ftags%2F{version}\")\n text = re.sub(r\"\\?definitionId=\\d+&branchName=master\", f\"?definitionId=2&branchName=refs%2Ftags%2F{version}\", text)\n\n skip_begin = r\"<!-- following section will be skipped from PyPI description -->\"\n skip_end = r\"<!-- end skipping PyPI description -->\"\n # todo: wrap content as commented description\n return re.sub(rf\"{skip_begin}.+?{skip_end}\", \"<!-- -->\", text, flags=re.IGNORECASE + re.DOTALL)\n\n\ndef _load_py_module(fname, pkg=\"torchmetrics\"):\n spec = spec_from_file_location(os.path.join(pkg, fname), os.path.join(_PATH_SOURCE, pkg, fname))\n py = module_from_spec(spec)\n spec.loader.exec_module(py)\n return py\n\n\nABOUT = _load_py_module(\"__about__.py\")\nLONG_DESCRIPTION = _load_readme_description(\n _PATH_ROOT,\n homepage=ABOUT.__homepage__,\n version=f\"v{ABOUT.__version__}\",\n)\nBASE_REQUIREMENTS = _load_requirements(path_dir=_PATH_ROOT, file_name=\"requirements.txt\")\n\n\ndef _prepare_extras(skip_files: Tuple[str] = (\"devel.txt\", \"doctest.txt\", \"integrate.txt\", \"docs.txt\")):\n # find all extra requirements\n _load_req = partial(_load_requirements, path_dir=_PATH_REQUIRE)\n found_req_files = sorted(os.path.basename(p) for p in glob.glob(os.path.join(_PATH_REQUIRE, \"*.txt\")))\n # filter unwanted files\n found_req_files = [n for n in found_req_files if n not in skip_files]\n found_req_names = [os.path.splitext(req)[0] for req in found_req_files]\n # define basic and extra extras\n extras_req = {\n name: _load_req(file_name=fname) for name, fname in zip(found_req_names, found_req_files) if \"_test\" not in name\n }\n for name, fname in zip(found_req_names, found_req_files):\n if \"_test\" in name:\n extras_req[\"test\"] += _load_req(file_name=fname)\n # filter the uniques\n extras_req = {n: list(set(req)) for n, req in extras_req.items()}\n # create an 'all' keyword that install all possible dependencies\n extras_req[\"all\"] = list(chain([pkgs for k, pkgs in extras_req.items() if k not in (\"test\", \"docs\")]))\n extras_req[\"dev\"] = extras_req[\"all\"] + extras_req[\"test\"]\n return extras_req\n\n\n# https://packaging.python.org/discussions/install-requires-vs-requirements /\n# keep the meta-data here for simplicity in reading this file... it's not obvious\n# what happens and to non-engineers they won't know to look in init ...\n# the goal of the project is simplicity for researchers, don't want to add too much\n# engineer specific practices\nif __name__ == \"__main__\":\n setup(\n name=\"torchmetrics\",\n version=ABOUT.__version__,\n description=ABOUT.__docs__,\n author=ABOUT.__author__,\n author_email=ABOUT.__author_email__,\n url=ABOUT.__homepage__,\n download_url=os.path.join(ABOUT.__homepage__, \"archive\", \"master.zip\"),\n license=ABOUT.__license__,\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n include_package_data=True,\n zip_safe=False,\n keywords=[\"deep learning\", \"machine learning\", \"pytorch\", \"metrics\", \"AI\"],\n python_requires=\">=3.7\",\n setup_requires=[],\n install_requires=BASE_REQUIREMENTS,\n extras_require=_prepare_extras(),\n project_urls={\n \"Bug Tracker\": os.path.join(ABOUT.__homepage__, \"issues\"),\n \"Documentation\": \"https://torchmetrics.rtfd.io/en/latest/\",\n \"Source Code\": ABOUT.__homepage__,\n },\n classifiers=[\n \"Environment :: Console\",\n \"Natural Language :: English\",\n # How mature is this project? Common values are\n # 3 - Alpha, 4 - Beta, 5 - Production/Stable\n \"Development Status :: 5 - Production/Stable\",\n # Indicate who your project is intended for\n \"Intended Audience :: Developers\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Scientific/Engineering :: Image Recognition\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n # Pick your license as you wish\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: OS Independent\",\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n ],\n )\n", "path": "setup.py"}]} | 3,635 | 117 |
gh_patches_debug_12073 | rasdani/github-patches | git_diff | mkdocs__mkdocs-2073 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Absolute paths with backward slashes
@waylan Sorry you to bother you again on this, but I have one more small question.
On one of your postings you mentioned a difference between a path (all with "\\") and a link (all with "/").
In mkdocs I see now I've mixed them up completely for linking to -for example- local images. This has always worked without any issue.
However, there is a difference in link resolution between:
```markdown
[absolute url](/imgs/image.png)
[absolute path](\imgs\image.png)
```
First one results in a absolute html link. The second a relative one.
Is is behaviour intentional ? I hope so as it will solve an issue I am having. If not I'd really like to know before building on it while running the risk of this behaviour being changed in the future.
I hope I am not asking the obvious. I've checked the docs but don't seem to find any mention of this.
Thanks. Sander.
</issue>
<code>
[start of mkdocs/structure/pages.py]
1 import os
2 import datetime
3 import logging
4 from urllib.parse import urlparse, urlunparse, urljoin
5 from urllib.parse import unquote as urlunquote
6
7 import markdown
8 from markdown.extensions import Extension
9 from markdown.treeprocessors import Treeprocessor
10 from markdown.util import AMP_SUBSTITUTE
11
12 from mkdocs.structure.toc import get_toc
13 from mkdocs.utils import meta, get_markdown_title, warning_filter
14
15 log = logging.getLogger(__name__)
16 log.addFilter(warning_filter)
17
18
19 class Page:
20 def __init__(self, title, file, config):
21 file.page = self
22 self.file = file
23 self.title = title
24
25 # Navigation attributes
26 self.parent = None
27 self.children = None
28 self.previous_page = None
29 self.next_page = None
30 self.active = False
31
32 self.is_section = False
33 self.is_page = True
34 self.is_link = False
35
36 # Support SOURCE_DATE_EPOCH environment variable for "reproducible" builds.
37 # See https://reproducible-builds.org/specs/source-date-epoch/
38 if 'SOURCE_DATE_EPOCH' in os.environ:
39 self.update_date = datetime.datetime.utcfromtimestamp(
40 int(os.environ['SOURCE_DATE_EPOCH'])
41 ).strftime("%Y-%m-%d")
42 else:
43 self.update_date = datetime.datetime.now().strftime("%Y-%m-%d")
44
45 self._set_canonical_url(config.get('site_url', None))
46 self._set_edit_url(config.get('repo_url', None), config.get('edit_uri', None))
47
48 # Placeholders to be filled in later in the build process.
49 self.markdown = None
50 self.content = None
51 self.toc = []
52 self.meta = {}
53
54 def __eq__(self, other):
55
56 def sub_dict(d):
57 return {key: value for key, value in d.items() if key in ['title', 'file']}
58
59 return (isinstance(other, self.__class__) and sub_dict(self.__dict__) == sub_dict(other.__dict__))
60
61 def __ne__(self, other):
62 return not self.__eq__(other)
63
64 def __repr__(self):
65 title = "'{}'".format(self.title) if (self.title is not None) else '[blank]'
66 return "Page(title={}, url='{}')".format(title, self.abs_url or self.file.url)
67
68 def _indent_print(self, depth=0):
69 return '{}{}'.format(' ' * depth, repr(self))
70
71 def _get_active(self):
72 """ Return active status of page. """
73 return self.__active
74
75 def _set_active(self, value):
76 """ Set active status of page and ancestors. """
77 self.__active = bool(value)
78 if self.parent is not None:
79 self.parent.active = bool(value)
80
81 active = property(_get_active, _set_active)
82
83 @property
84 def is_index(self):
85 return self.file.name == 'index'
86
87 @property
88 def is_top_level(self):
89 return self.parent is None
90
91 @property
92 def is_homepage(self):
93 return self.is_top_level and self.is_index and self.file.url == '.'
94
95 @property
96 def url(self):
97 return '' if self.file.url == '.' else self.file.url
98
99 @property
100 def ancestors(self):
101 if self.parent is None:
102 return []
103 return [self.parent] + self.parent.ancestors
104
105 def _set_canonical_url(self, base):
106 if base:
107 if not base.endswith('/'):
108 base += '/'
109 self.canonical_url = urljoin(base, self.url)
110 self.abs_url = urlparse(self.canonical_url).path
111 else:
112 self.canonical_url = None
113 self.abs_url = None
114
115 def _set_edit_url(self, repo_url, edit_uri):
116 if repo_url and edit_uri:
117 src_path = self.file.src_path.replace('\\', '/')
118 self.edit_url = urljoin(repo_url, edit_uri + src_path)
119 else:
120 self.edit_url = None
121
122 def read_source(self, config):
123 source = config['plugins'].run_event(
124 'page_read_source', page=self, config=config
125 )
126 if source is None:
127 try:
128 with open(self.file.abs_src_path, 'r', encoding='utf-8-sig', errors='strict') as f:
129 source = f.read()
130 except OSError:
131 log.error('File not found: {}'.format(self.file.src_path))
132 raise
133 except ValueError:
134 log.error('Encoding error reading file: {}'.format(self.file.src_path))
135 raise
136
137 self.markdown, self.meta = meta.get_data(source)
138 self._set_title()
139
140 def _set_title(self):
141 """
142 Set the title for a Markdown document.
143
144 Check these in order and use the first that returns a valid title:
145 - value provided on init (passed in from config)
146 - value of metadata 'title'
147 - content of the first H1 in Markdown content
148 - convert filename to title
149 """
150 if self.title is not None:
151 return
152
153 if 'title' in self.meta:
154 self.title = self.meta['title']
155 return
156
157 title = get_markdown_title(self.markdown)
158
159 if title is None:
160 if self.is_homepage:
161 title = 'Home'
162 else:
163 title = self.file.name.replace('-', ' ').replace('_', ' ')
164 # Capitalize if the filename was all lowercase, otherwise leave it as-is.
165 if title.lower() == title:
166 title = title.capitalize()
167
168 self.title = title
169
170 def render(self, config, files):
171 """
172 Convert the Markdown source file to HTML as per the config.
173 """
174
175 extensions = [
176 _RelativePathExtension(self.file, files)
177 ] + config['markdown_extensions']
178
179 md = markdown.Markdown(
180 extensions=extensions,
181 extension_configs=config['mdx_configs'] or {}
182 )
183 self.content = md.convert(self.markdown)
184 self.toc = get_toc(getattr(md, 'toc_tokens', []))
185
186
187 class _RelativePathTreeprocessor(Treeprocessor):
188 def __init__(self, file, files):
189 self.file = file
190 self.files = files
191
192 def run(self, root):
193 """
194 Update urls on anchors and images to make them relative
195
196 Iterates through the full document tree looking for specific
197 tags and then makes them relative based on the site navigation
198 """
199 for element in root.iter():
200 if element.tag == 'a':
201 key = 'href'
202 elif element.tag == 'img':
203 key = 'src'
204 else:
205 continue
206
207 url = element.get(key)
208 new_url = self.path_to_url(url)
209 element.set(key, new_url)
210
211 return root
212
213 def path_to_url(self, url):
214 scheme, netloc, path, params, query, fragment = urlparse(url)
215
216 if (scheme or netloc or not path or url.startswith('/')
217 or AMP_SUBSTITUTE in url or '.' not in os.path.split(path)[-1]):
218 # Ignore URLs unless they are a relative link to a source file.
219 # AMP_SUBSTITUTE is used internally by Markdown only for email.
220 # No '.' in the last part of a path indicates path does not point to a file.
221 return url
222
223 # Determine the filepath of the target.
224 target_path = os.path.join(os.path.dirname(self.file.src_path), urlunquote(path))
225 target_path = os.path.normpath(target_path).lstrip(os.sep)
226
227 # Validate that the target exists in files collection.
228 if target_path not in self.files:
229 log.warning(
230 "Documentation file '{}' contains a link to '{}' which is not found "
231 "in the documentation files.".format(self.file.src_path, target_path)
232 )
233 return url
234 target_file = self.files.get_file_from_path(target_path)
235 path = target_file.url_relative_to(self.file)
236 components = (scheme, netloc, path, params, query, fragment)
237 return urlunparse(components)
238
239
240 class _RelativePathExtension(Extension):
241 """
242 The Extension class is what we pass to markdown, it then
243 registers the Treeprocessor.
244 """
245
246 def __init__(self, file, files):
247 self.file = file
248 self.files = files
249
250 def extendMarkdown(self, md):
251 relpath = _RelativePathTreeprocessor(self.file, self.files)
252 md.treeprocessors.register(relpath, "relpath", 0)
253
[end of mkdocs/structure/pages.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mkdocs/structure/pages.py b/mkdocs/structure/pages.py
--- a/mkdocs/structure/pages.py
+++ b/mkdocs/structure/pages.py
@@ -213,7 +213,7 @@
def path_to_url(self, url):
scheme, netloc, path, params, query, fragment = urlparse(url)
- if (scheme or netloc or not path or url.startswith('/')
+ if (scheme or netloc or not path or url.startswith('/') or url.startswith('\\')
or AMP_SUBSTITUTE in url or '.' not in os.path.split(path)[-1]):
# Ignore URLs unless they are a relative link to a source file.
# AMP_SUBSTITUTE is used internally by Markdown only for email.
| {"golden_diff": "diff --git a/mkdocs/structure/pages.py b/mkdocs/structure/pages.py\n--- a/mkdocs/structure/pages.py\n+++ b/mkdocs/structure/pages.py\n@@ -213,7 +213,7 @@\n def path_to_url(self, url):\n scheme, netloc, path, params, query, fragment = urlparse(url)\n \n- if (scheme or netloc or not path or url.startswith('/')\n+ if (scheme or netloc or not path or url.startswith('/') or url.startswith('\\\\')\n or AMP_SUBSTITUTE in url or '.' not in os.path.split(path)[-1]):\n # Ignore URLs unless they are a relative link to a source file.\n # AMP_SUBSTITUTE is used internally by Markdown only for email.\n", "issue": "Absolute paths with backward slashes\n@waylan Sorry you to bother you again on this, but I have one more small question.\r\n\r\nOn one of your postings you mentioned a difference between a path (all with \"\\\\\") and a link (all with \"/\").\r\nIn mkdocs I see now I've mixed them up completely for linking to -for example- local images. This has always worked without any issue.\r\n\r\nHowever, there is a difference in link resolution between:\r\n\r\n```markdown\r\n[absolute url](/imgs/image.png)\r\n\r\n[absolute path](\\imgs\\image.png)\r\n```\r\nFirst one results in a absolute html link. The second a relative one. \r\nIs is behaviour intentional ? I hope so as it will solve an issue I am having. If not I'd really like to know before building on it while running the risk of this behaviour being changed in the future.\r\n\r\nI hope I am not asking the obvious. I've checked the docs but don't seem to find any mention of this.\r\n\r\nThanks. Sander.\n", "before_files": [{"content": "import os\nimport datetime\nimport logging\nfrom urllib.parse import urlparse, urlunparse, urljoin\nfrom urllib.parse import unquote as urlunquote\n\nimport markdown\nfrom markdown.extensions import Extension\nfrom markdown.treeprocessors import Treeprocessor\nfrom markdown.util import AMP_SUBSTITUTE\n\nfrom mkdocs.structure.toc import get_toc\nfrom mkdocs.utils import meta, get_markdown_title, warning_filter\n\nlog = logging.getLogger(__name__)\nlog.addFilter(warning_filter)\n\n\nclass Page:\n def __init__(self, title, file, config):\n file.page = self\n self.file = file\n self.title = title\n\n # Navigation attributes\n self.parent = None\n self.children = None\n self.previous_page = None\n self.next_page = None\n self.active = False\n\n self.is_section = False\n self.is_page = True\n self.is_link = False\n\n # Support SOURCE_DATE_EPOCH environment variable for \"reproducible\" builds.\n # See https://reproducible-builds.org/specs/source-date-epoch/\n if 'SOURCE_DATE_EPOCH' in os.environ:\n self.update_date = datetime.datetime.utcfromtimestamp(\n int(os.environ['SOURCE_DATE_EPOCH'])\n ).strftime(\"%Y-%m-%d\")\n else:\n self.update_date = datetime.datetime.now().strftime(\"%Y-%m-%d\")\n\n self._set_canonical_url(config.get('site_url', None))\n self._set_edit_url(config.get('repo_url', None), config.get('edit_uri', None))\n\n # Placeholders to be filled in later in the build process.\n self.markdown = None\n self.content = None\n self.toc = []\n self.meta = {}\n\n def __eq__(self, other):\n\n def sub_dict(d):\n return {key: value for key, value in d.items() if key in ['title', 'file']}\n\n return (isinstance(other, self.__class__) and sub_dict(self.__dict__) == sub_dict(other.__dict__))\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __repr__(self):\n title = \"'{}'\".format(self.title) if (self.title is not None) else '[blank]'\n return \"Page(title={}, url='{}')\".format(title, self.abs_url or self.file.url)\n\n def _indent_print(self, depth=0):\n return '{}{}'.format(' ' * depth, repr(self))\n\n def _get_active(self):\n \"\"\" Return active status of page. \"\"\"\n return self.__active\n\n def _set_active(self, value):\n \"\"\" Set active status of page and ancestors. \"\"\"\n self.__active = bool(value)\n if self.parent is not None:\n self.parent.active = bool(value)\n\n active = property(_get_active, _set_active)\n\n @property\n def is_index(self):\n return self.file.name == 'index'\n\n @property\n def is_top_level(self):\n return self.parent is None\n\n @property\n def is_homepage(self):\n return self.is_top_level and self.is_index and self.file.url == '.'\n\n @property\n def url(self):\n return '' if self.file.url == '.' else self.file.url\n\n @property\n def ancestors(self):\n if self.parent is None:\n return []\n return [self.parent] + self.parent.ancestors\n\n def _set_canonical_url(self, base):\n if base:\n if not base.endswith('/'):\n base += '/'\n self.canonical_url = urljoin(base, self.url)\n self.abs_url = urlparse(self.canonical_url).path\n else:\n self.canonical_url = None\n self.abs_url = None\n\n def _set_edit_url(self, repo_url, edit_uri):\n if repo_url and edit_uri:\n src_path = self.file.src_path.replace('\\\\', '/')\n self.edit_url = urljoin(repo_url, edit_uri + src_path)\n else:\n self.edit_url = None\n\n def read_source(self, config):\n source = config['plugins'].run_event(\n 'page_read_source', page=self, config=config\n )\n if source is None:\n try:\n with open(self.file.abs_src_path, 'r', encoding='utf-8-sig', errors='strict') as f:\n source = f.read()\n except OSError:\n log.error('File not found: {}'.format(self.file.src_path))\n raise\n except ValueError:\n log.error('Encoding error reading file: {}'.format(self.file.src_path))\n raise\n\n self.markdown, self.meta = meta.get_data(source)\n self._set_title()\n\n def _set_title(self):\n \"\"\"\n Set the title for a Markdown document.\n\n Check these in order and use the first that returns a valid title:\n - value provided on init (passed in from config)\n - value of metadata 'title'\n - content of the first H1 in Markdown content\n - convert filename to title\n \"\"\"\n if self.title is not None:\n return\n\n if 'title' in self.meta:\n self.title = self.meta['title']\n return\n\n title = get_markdown_title(self.markdown)\n\n if title is None:\n if self.is_homepage:\n title = 'Home'\n else:\n title = self.file.name.replace('-', ' ').replace('_', ' ')\n # Capitalize if the filename was all lowercase, otherwise leave it as-is.\n if title.lower() == title:\n title = title.capitalize()\n\n self.title = title\n\n def render(self, config, files):\n \"\"\"\n Convert the Markdown source file to HTML as per the config.\n \"\"\"\n\n extensions = [\n _RelativePathExtension(self.file, files)\n ] + config['markdown_extensions']\n\n md = markdown.Markdown(\n extensions=extensions,\n extension_configs=config['mdx_configs'] or {}\n )\n self.content = md.convert(self.markdown)\n self.toc = get_toc(getattr(md, 'toc_tokens', []))\n\n\nclass _RelativePathTreeprocessor(Treeprocessor):\n def __init__(self, file, files):\n self.file = file\n self.files = files\n\n def run(self, root):\n \"\"\"\n Update urls on anchors and images to make them relative\n\n Iterates through the full document tree looking for specific\n tags and then makes them relative based on the site navigation\n \"\"\"\n for element in root.iter():\n if element.tag == 'a':\n key = 'href'\n elif element.tag == 'img':\n key = 'src'\n else:\n continue\n\n url = element.get(key)\n new_url = self.path_to_url(url)\n element.set(key, new_url)\n\n return root\n\n def path_to_url(self, url):\n scheme, netloc, path, params, query, fragment = urlparse(url)\n\n if (scheme or netloc or not path or url.startswith('/')\n or AMP_SUBSTITUTE in url or '.' not in os.path.split(path)[-1]):\n # Ignore URLs unless they are a relative link to a source file.\n # AMP_SUBSTITUTE is used internally by Markdown only for email.\n # No '.' in the last part of a path indicates path does not point to a file.\n return url\n\n # Determine the filepath of the target.\n target_path = os.path.join(os.path.dirname(self.file.src_path), urlunquote(path))\n target_path = os.path.normpath(target_path).lstrip(os.sep)\n\n # Validate that the target exists in files collection.\n if target_path not in self.files:\n log.warning(\n \"Documentation file '{}' contains a link to '{}' which is not found \"\n \"in the documentation files.\".format(self.file.src_path, target_path)\n )\n return url\n target_file = self.files.get_file_from_path(target_path)\n path = target_file.url_relative_to(self.file)\n components = (scheme, netloc, path, params, query, fragment)\n return urlunparse(components)\n\n\nclass _RelativePathExtension(Extension):\n \"\"\"\n The Extension class is what we pass to markdown, it then\n registers the Treeprocessor.\n \"\"\"\n\n def __init__(self, file, files):\n self.file = file\n self.files = files\n\n def extendMarkdown(self, md):\n relpath = _RelativePathTreeprocessor(self.file, self.files)\n md.treeprocessors.register(relpath, \"relpath\", 0)\n", "path": "mkdocs/structure/pages.py"}]} | 3,249 | 163 |
gh_patches_debug_24477 | rasdani/github-patches | git_diff | wemake-services__wemake-python-styleguide-2529 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
609: Allow __enter__() inside an __enter__()
### What's wrong
One design pattern is to wrap a context manager. It would be nice to avoid WPS609 errors with this code, which seems to require accessing the direct magic methods.
### How it should be
Allow code like:
```
class Foo:
...
def __enter__(self):
self._conn.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._conn.__exit__(exc_type, exc_value, traceback)
```
I guess the same for aenter/aexit as well.
</issue>
<code>
[start of wemake_python_styleguide/visitors/ast/attributes.py]
1 import ast
2 from typing import ClassVar, FrozenSet
3
4 from typing_extensions import final
5
6 from wemake_python_styleguide.constants import ALL_MAGIC_METHODS
7 from wemake_python_styleguide.logic.naming import access
8 from wemake_python_styleguide.violations.best_practices import (
9 ProtectedAttributeViolation,
10 )
11 from wemake_python_styleguide.violations.oop import (
12 DirectMagicAttributeAccessViolation,
13 )
14 from wemake_python_styleguide.visitors.base import BaseNodeVisitor
15
16
17 @final
18 class WrongAttributeVisitor(BaseNodeVisitor):
19 """Ensures that attributes are used correctly."""
20
21 _allowed_to_use_protected: ClassVar[FrozenSet[str]] = frozenset((
22 'self',
23 'cls',
24 'mcs',
25 ))
26
27 def visit_Attribute(self, node: ast.Attribute) -> None:
28 """Checks the `Attribute` node."""
29 self._check_protected_attribute(node)
30 self._check_magic_attribute(node)
31 self.generic_visit(node)
32
33 def _is_super_called(self, node: ast.Call) -> bool:
34 return isinstance(node.func, ast.Name) and node.func.id == 'super'
35
36 def _ensure_attribute_type(self, node: ast.Attribute, exception) -> None:
37 if isinstance(node.value, ast.Name):
38 if node.value.id in self._allowed_to_use_protected:
39 return
40
41 if isinstance(node.value, ast.Call):
42 if self._is_super_called(node.value):
43 return
44
45 self.add_violation(exception(node, text=node.attr))
46
47 def _check_protected_attribute(self, node: ast.Attribute) -> None:
48 if access.is_protected(node.attr):
49 self._ensure_attribute_type(node, ProtectedAttributeViolation)
50
51 def _check_magic_attribute(self, node: ast.Attribute) -> None:
52 if access.is_magic(node.attr):
53 if node.attr in ALL_MAGIC_METHODS:
54 self._ensure_attribute_type(
55 node, DirectMagicAttributeAccessViolation,
56 )
57
[end of wemake_python_styleguide/visitors/ast/attributes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wemake_python_styleguide/visitors/ast/attributes.py b/wemake_python_styleguide/visitors/ast/attributes.py
--- a/wemake_python_styleguide/visitors/ast/attributes.py
+++ b/wemake_python_styleguide/visitors/ast/attributes.py
@@ -3,7 +3,9 @@
from typing_extensions import final
+from wemake_python_styleguide.compat.aliases import FunctionNodes
from wemake_python_styleguide.constants import ALL_MAGIC_METHODS
+from wemake_python_styleguide.logic import nodes
from wemake_python_styleguide.logic.naming import access
from wemake_python_styleguide.violations.best_practices import (
ProtectedAttributeViolation,
@@ -50,6 +52,15 @@
def _check_magic_attribute(self, node: ast.Attribute) -> None:
if access.is_magic(node.attr):
+ # If "magic" method being called has the same name as
+ # the enclosing function, then it is a "wrapper" and thus
+ # a "false positive".
+
+ ctx = nodes.get_context(node)
+ if isinstance(ctx, FunctionNodes):
+ if node.attr == ctx.name:
+ return
+
if node.attr in ALL_MAGIC_METHODS:
self._ensure_attribute_type(
node, DirectMagicAttributeAccessViolation,
| {"golden_diff": "diff --git a/wemake_python_styleguide/visitors/ast/attributes.py b/wemake_python_styleguide/visitors/ast/attributes.py\n--- a/wemake_python_styleguide/visitors/ast/attributes.py\n+++ b/wemake_python_styleguide/visitors/ast/attributes.py\n@@ -3,7 +3,9 @@\n \n from typing_extensions import final\n \n+from wemake_python_styleguide.compat.aliases import FunctionNodes\n from wemake_python_styleguide.constants import ALL_MAGIC_METHODS\n+from wemake_python_styleguide.logic import nodes\n from wemake_python_styleguide.logic.naming import access\n from wemake_python_styleguide.violations.best_practices import (\n ProtectedAttributeViolation,\n@@ -50,6 +52,15 @@\n \n def _check_magic_attribute(self, node: ast.Attribute) -> None:\n if access.is_magic(node.attr):\n+ # If \"magic\" method being called has the same name as\n+ # the enclosing function, then it is a \"wrapper\" and thus\n+ # a \"false positive\".\n+\n+ ctx = nodes.get_context(node)\n+ if isinstance(ctx, FunctionNodes):\n+ if node.attr == ctx.name:\n+ return\n+\n if node.attr in ALL_MAGIC_METHODS:\n self._ensure_attribute_type(\n node, DirectMagicAttributeAccessViolation,\n", "issue": "609: Allow __enter__() inside an __enter__()\n### What's wrong\r\n\r\nOne design pattern is to wrap a context manager. It would be nice to avoid WPS609 errors with this code, which seems to require accessing the direct magic methods.\r\n\r\n### How it should be\r\n\r\nAllow code like:\r\n```\r\nclass Foo:\r\n ...\r\n\r\n def __enter__(self):\r\n self._conn.__enter__()\r\n return self\r\n\r\n def __exit__(self, exc_type, exc_value, traceback):\r\n self._conn.__exit__(exc_type, exc_value, traceback)\r\n```\r\n\r\nI guess the same for aenter/aexit as well.\n", "before_files": [{"content": "import ast\nfrom typing import ClassVar, FrozenSet\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.constants import ALL_MAGIC_METHODS\nfrom wemake_python_styleguide.logic.naming import access\nfrom wemake_python_styleguide.violations.best_practices import (\n ProtectedAttributeViolation,\n)\nfrom wemake_python_styleguide.violations.oop import (\n DirectMagicAttributeAccessViolation,\n)\nfrom wemake_python_styleguide.visitors.base import BaseNodeVisitor\n\n\n@final\nclass WrongAttributeVisitor(BaseNodeVisitor):\n \"\"\"Ensures that attributes are used correctly.\"\"\"\n\n _allowed_to_use_protected: ClassVar[FrozenSet[str]] = frozenset((\n 'self',\n 'cls',\n 'mcs',\n ))\n\n def visit_Attribute(self, node: ast.Attribute) -> None:\n \"\"\"Checks the `Attribute` node.\"\"\"\n self._check_protected_attribute(node)\n self._check_magic_attribute(node)\n self.generic_visit(node)\n\n def _is_super_called(self, node: ast.Call) -> bool:\n return isinstance(node.func, ast.Name) and node.func.id == 'super'\n\n def _ensure_attribute_type(self, node: ast.Attribute, exception) -> None:\n if isinstance(node.value, ast.Name):\n if node.value.id in self._allowed_to_use_protected:\n return\n\n if isinstance(node.value, ast.Call):\n if self._is_super_called(node.value):\n return\n\n self.add_violation(exception(node, text=node.attr))\n\n def _check_protected_attribute(self, node: ast.Attribute) -> None:\n if access.is_protected(node.attr):\n self._ensure_attribute_type(node, ProtectedAttributeViolation)\n\n def _check_magic_attribute(self, node: ast.Attribute) -> None:\n if access.is_magic(node.attr):\n if node.attr in ALL_MAGIC_METHODS:\n self._ensure_attribute_type(\n node, DirectMagicAttributeAccessViolation,\n )\n", "path": "wemake_python_styleguide/visitors/ast/attributes.py"}]} | 1,213 | 297 |
gh_patches_debug_21144 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-3294 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
</issue>
<code>
[start of applications/Chat/inference/server.py]
1 import argparse
2 import os
3 from threading import Lock
4 from typing import Dict, Generator, List, Optional
5
6 import torch
7 import uvicorn
8 from fastapi import FastAPI, HTTPException, Request
9 from fastapi.middleware.cors import CORSMiddleware
10 from llama_gptq import load_quant
11 from pydantic import BaseModel, Field
12 from slowapi import Limiter, _rate_limit_exceeded_handler
13 from slowapi.errors import RateLimitExceeded
14 from slowapi.util import get_remote_address
15 from sse_starlette.sse import EventSourceResponse
16 from transformers import AutoTokenizer, GenerationConfig, LlamaForCausalLM
17 from utils import ChatPromptProcessor, Dialogue, LockedIterator, sample_streamingly, update_model_kwargs_fn
18
19 CONTEXT = 'Below is an instruction that describes a task. Write a response that appropriately completes the request. Do not generate new instructions.'
20 MAX_LEN = 512
21 running_lock = Lock()
22
23
24 class GenerationTaskReq(BaseModel):
25 max_new_tokens: int = Field(gt=0, le=512, example=64)
26 history: List[Dialogue] = Field(min_items=1)
27 top_k: Optional[int] = Field(default=None, gt=0, example=50)
28 top_p: Optional[float] = Field(default=None, gt=0.0, lt=1.0, example=0.5)
29 temperature: Optional[float] = Field(default=None, gt=0.0, lt=1.0, example=0.7)
30
31
32 limiter = Limiter(key_func=get_remote_address)
33 app = FastAPI()
34 app.state.limiter = limiter
35 app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)
36
37 # set CORS
38 origin_spec_from_env = os.environ.get('CORS_ORIGIN', None)
39
40 if origin_spec_from_env is not None:
41 # allow CORS from the specified origins
42 origins = os.environ['CORS_ORIGIN'].split(',')
43 else:
44 # allow CORS from all origins
45 origins = ["*"]
46
47 app.add_middleware(
48 CORSMiddleware,
49 allow_origins=origins,
50 allow_credentials=True,
51 allow_methods=["*"],
52 allow_headers=["*"],
53 )
54
55
56 def generate_streamingly(prompt, max_new_tokens, top_k, top_p, temperature):
57 inputs = {k: v.cuda() for k, v in tokenizer(prompt, return_tensors="pt").items()}
58 model_kwargs = {
59 'max_generate_tokens': max_new_tokens,
60 'early_stopping': True,
61 'top_k': top_k,
62 'top_p': top_p,
63 'temperature': temperature,
64 'prepare_inputs_fn': model.prepare_inputs_for_generation,
65 'update_model_kwargs_fn': update_model_kwargs_fn,
66 }
67 is_first_word = True
68 generator = LockedIterator(sample_streamingly(model, **inputs, **model_kwargs), running_lock)
69 for output in generator:
70 output = output.cpu()
71 tokens = tokenizer.convert_ids_to_tokens(output, skip_special_tokens=True)
72 current_sub_tokens = []
73 for token in tokens:
74 if token in tokenizer.all_special_tokens:
75 continue
76 current_sub_tokens.append(token)
77 if current_sub_tokens:
78 out_string = tokenizer.sp_model.decode(current_sub_tokens)
79 if is_first_word:
80 out_string = out_string.lstrip()
81 is_first_word = False
82 elif current_sub_tokens[0].startswith('▁'):
83 # whitespace will be ignored by the frontend
84 out_string = ' ' + out_string
85 yield out_string
86
87
88 async def event_generator(request: Request, generator: Generator):
89 while True:
90 if await request.is_disconnected():
91 break
92 try:
93 yield {'event': 'generate', 'data': next(generator)}
94 except StopIteration:
95 yield {'event': 'end', 'data': ''}
96 break
97
98
99 @app.post('/generate/stream')
100 @limiter.limit('1/second')
101 def generate(data: GenerationTaskReq, request: Request):
102 prompt = prompt_processor.preprocess_prompt(data.history, data.max_new_tokens)
103 event_source = event_generator(
104 request, generate_streamingly(prompt, data.max_new_tokens, data.top_k, data.top_p, data.temperature))
105 return EventSourceResponse(event_source)
106
107
108 @app.post('/generate')
109 @limiter.limit('1/second')
110 def generate_no_stream(data: GenerationTaskReq, request: Request):
111 prompt = prompt_processor.preprocess_prompt(data.history, data.max_new_tokens)
112 inputs = {k: v.cuda() for k, v in tokenizer(prompt, return_tensors="pt").items()}
113 with running_lock:
114 output = model.generate(**inputs, **data.dict(exclude={'history'}))
115 output = output.cpu()
116 prompt_len = inputs['input_ids'].size(1)
117 response = output[0, prompt_len:]
118 out_string = tokenizer.decode(response, skip_special_tokens=True)
119 return prompt_processor.postprocess_output(out_string)
120
121
122 if __name__ == '__main__':
123 parser = argparse.ArgumentParser()
124 parser.add_argument(
125 'pretrained',
126 help='Path to pretrained model. Can be a local path or a model name from the HuggingFace model hub.')
127 parser.add_argument('--quant',
128 choices=['8bit', '4bit'],
129 default=None,
130 help='Quantization mode. Default: None (no quantization, fp16).')
131 parser.add_argument(
132 '--gptq_checkpoint',
133 default=None,
134 help='Path to GPTQ checkpoint. This is only useful when quantization mode is 4bit. Default: None.')
135 parser.add_argument('--gptq_group_size',
136 type=int,
137 default=128,
138 help='Group size for GPTQ. This is only useful when quantization mode is 4bit. Default: 128.')
139 parser.add_argument('--http_host', default='0.0.0.0')
140 parser.add_argument('--http_port', type=int, default=7070)
141 args = parser.parse_args()
142
143 if args.quant == '4bit':
144 assert args.gptq_checkpoint is not None, 'Please specify a GPTQ checkpoint.'
145
146 tokenizer = AutoTokenizer.from_pretrained(args.pretrained)
147 prompt_processor = ChatPromptProcessor(tokenizer, CONTEXT, MAX_LEN)
148
149 if args.quant == '4bit':
150 model = load_quant(args.pretrained, args.gptq_checkpoint, 4, args.gptq_group_size)
151 model.cuda()
152 else:
153 model = LlamaForCausalLM.from_pretrained(
154 args.pretrained,
155 load_in_8bit=(args.quant == '8bit'),
156 torch_dtype=torch.float16,
157 device_map="auto",
158 )
159 if args.quant != '8bit':
160 model.half() # seems to fix bugs for some users.
161 model.eval()
162
163 config = uvicorn.Config(app, host=args.http_host, port=args.http_port)
164 server = uvicorn.Server(config=config)
165 server.run()
166
[end of applications/Chat/inference/server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/applications/Chat/inference/server.py b/applications/Chat/inference/server.py
--- a/applications/Chat/inference/server.py
+++ b/applications/Chat/inference/server.py
@@ -27,6 +27,7 @@
top_k: Optional[int] = Field(default=None, gt=0, example=50)
top_p: Optional[float] = Field(default=None, gt=0.0, lt=1.0, example=0.5)
temperature: Optional[float] = Field(default=None, gt=0.0, lt=1.0, example=0.7)
+ repetition_penalty: Optional[float] = Field(default=None, gt=1.0, example=1.2)
limiter = Limiter(key_func=get_remote_address)
@@ -55,6 +56,7 @@
def generate_streamingly(prompt, max_new_tokens, top_k, top_p, temperature):
inputs = {k: v.cuda() for k, v in tokenizer(prompt, return_tensors="pt").items()}
+ #TODO(ver217): streaming generation does not support repetition_penalty now
model_kwargs = {
'max_generate_tokens': max_new_tokens,
'early_stopping': True,
| {"golden_diff": "diff --git a/applications/Chat/inference/server.py b/applications/Chat/inference/server.py\n--- a/applications/Chat/inference/server.py\n+++ b/applications/Chat/inference/server.py\n@@ -27,6 +27,7 @@\n top_k: Optional[int] = Field(default=None, gt=0, example=50)\n top_p: Optional[float] = Field(default=None, gt=0.0, lt=1.0, example=0.5)\n temperature: Optional[float] = Field(default=None, gt=0.0, lt=1.0, example=0.7)\n+ repetition_penalty: Optional[float] = Field(default=None, gt=1.0, example=1.2)\n \n \n limiter = Limiter(key_func=get_remote_address)\n@@ -55,6 +56,7 @@\n \n def generate_streamingly(prompt, max_new_tokens, top_k, top_p, temperature):\n inputs = {k: v.cuda() for k, v in tokenizer(prompt, return_tensors=\"pt\").items()}\n+ #TODO(ver217): streaming generation does not support repetition_penalty now\n model_kwargs = {\n 'max_generate_tokens': max_new_tokens,\n 'early_stopping': True,\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import argparse\nimport os\nfrom threading import Lock\nfrom typing import Dict, Generator, List, Optional\n\nimport torch\nimport uvicorn\nfrom fastapi import FastAPI, HTTPException, Request\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom llama_gptq import load_quant\nfrom pydantic import BaseModel, Field\nfrom slowapi import Limiter, _rate_limit_exceeded_handler\nfrom slowapi.errors import RateLimitExceeded\nfrom slowapi.util import get_remote_address\nfrom sse_starlette.sse import EventSourceResponse\nfrom transformers import AutoTokenizer, GenerationConfig, LlamaForCausalLM\nfrom utils import ChatPromptProcessor, Dialogue, LockedIterator, sample_streamingly, update_model_kwargs_fn\n\nCONTEXT = 'Below is an instruction that describes a task. Write a response that appropriately completes the request. Do not generate new instructions.'\nMAX_LEN = 512\nrunning_lock = Lock()\n\n\nclass GenerationTaskReq(BaseModel):\n max_new_tokens: int = Field(gt=0, le=512, example=64)\n history: List[Dialogue] = Field(min_items=1)\n top_k: Optional[int] = Field(default=None, gt=0, example=50)\n top_p: Optional[float] = Field(default=None, gt=0.0, lt=1.0, example=0.5)\n temperature: Optional[float] = Field(default=None, gt=0.0, lt=1.0, example=0.7)\n\n\nlimiter = Limiter(key_func=get_remote_address)\napp = FastAPI()\napp.state.limiter = limiter\napp.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler)\n\n# set CORS\norigin_spec_from_env = os.environ.get('CORS_ORIGIN', None)\n\nif origin_spec_from_env is not None:\n # allow CORS from the specified origins\n origins = os.environ['CORS_ORIGIN'].split(',')\nelse:\n # allow CORS from all origins\n origins = [\"*\"]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\ndef generate_streamingly(prompt, max_new_tokens, top_k, top_p, temperature):\n inputs = {k: v.cuda() for k, v in tokenizer(prompt, return_tensors=\"pt\").items()}\n model_kwargs = {\n 'max_generate_tokens': max_new_tokens,\n 'early_stopping': True,\n 'top_k': top_k,\n 'top_p': top_p,\n 'temperature': temperature,\n 'prepare_inputs_fn': model.prepare_inputs_for_generation,\n 'update_model_kwargs_fn': update_model_kwargs_fn,\n }\n is_first_word = True\n generator = LockedIterator(sample_streamingly(model, **inputs, **model_kwargs), running_lock)\n for output in generator:\n output = output.cpu()\n tokens = tokenizer.convert_ids_to_tokens(output, skip_special_tokens=True)\n current_sub_tokens = []\n for token in tokens:\n if token in tokenizer.all_special_tokens:\n continue\n current_sub_tokens.append(token)\n if current_sub_tokens:\n out_string = tokenizer.sp_model.decode(current_sub_tokens)\n if is_first_word:\n out_string = out_string.lstrip()\n is_first_word = False\n elif current_sub_tokens[0].startswith('\u2581'):\n # whitespace will be ignored by the frontend\n out_string = ' ' + out_string\n yield out_string\n\n\nasync def event_generator(request: Request, generator: Generator):\n while True:\n if await request.is_disconnected():\n break\n try:\n yield {'event': 'generate', 'data': next(generator)}\n except StopIteration:\n yield {'event': 'end', 'data': ''}\n break\n\n\[email protected]('/generate/stream')\[email protected]('1/second')\ndef generate(data: GenerationTaskReq, request: Request):\n prompt = prompt_processor.preprocess_prompt(data.history, data.max_new_tokens)\n event_source = event_generator(\n request, generate_streamingly(prompt, data.max_new_tokens, data.top_k, data.top_p, data.temperature))\n return EventSourceResponse(event_source)\n\n\[email protected]('/generate')\[email protected]('1/second')\ndef generate_no_stream(data: GenerationTaskReq, request: Request):\n prompt = prompt_processor.preprocess_prompt(data.history, data.max_new_tokens)\n inputs = {k: v.cuda() for k, v in tokenizer(prompt, return_tensors=\"pt\").items()}\n with running_lock:\n output = model.generate(**inputs, **data.dict(exclude={'history'}))\n output = output.cpu()\n prompt_len = inputs['input_ids'].size(1)\n response = output[0, prompt_len:]\n out_string = tokenizer.decode(response, skip_special_tokens=True)\n return prompt_processor.postprocess_output(out_string)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'pretrained',\n help='Path to pretrained model. Can be a local path or a model name from the HuggingFace model hub.')\n parser.add_argument('--quant',\n choices=['8bit', '4bit'],\n default=None,\n help='Quantization mode. Default: None (no quantization, fp16).')\n parser.add_argument(\n '--gptq_checkpoint',\n default=None,\n help='Path to GPTQ checkpoint. This is only useful when quantization mode is 4bit. Default: None.')\n parser.add_argument('--gptq_group_size',\n type=int,\n default=128,\n help='Group size for GPTQ. This is only useful when quantization mode is 4bit. Default: 128.')\n parser.add_argument('--http_host', default='0.0.0.0')\n parser.add_argument('--http_port', type=int, default=7070)\n args = parser.parse_args()\n\n if args.quant == '4bit':\n assert args.gptq_checkpoint is not None, 'Please specify a GPTQ checkpoint.'\n\n tokenizer = AutoTokenizer.from_pretrained(args.pretrained)\n prompt_processor = ChatPromptProcessor(tokenizer, CONTEXT, MAX_LEN)\n\n if args.quant == '4bit':\n model = load_quant(args.pretrained, args.gptq_checkpoint, 4, args.gptq_group_size)\n model.cuda()\n else:\n model = LlamaForCausalLM.from_pretrained(\n args.pretrained,\n load_in_8bit=(args.quant == '8bit'),\n torch_dtype=torch.float16,\n device_map=\"auto\",\n )\n if args.quant != '8bit':\n model.half() # seems to fix bugs for some users.\n model.eval()\n\n config = uvicorn.Config(app, host=args.http_host, port=args.http_port)\n server = uvicorn.Server(config=config)\n server.run()\n", "path": "applications/Chat/inference/server.py"}]} | 2,432 | 269 |
gh_patches_debug_18253 | rasdani/github-patches | git_diff | google__jax-1473 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
advi.py example is broken with jit
advi.py example is broken with jit: @partial(jit, static_argnums=(0, 1, 2, 4))
If jit is removed, then It works.
</issue>
<code>
[start of examples/advi.py]
1 # Copyright 2018 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Automatic differentiation variational inference in Numpy and JAX.
16
17 This demo fits a Gaussian approximation to an intractable, unnormalized
18 density, by differentiating through a Monte Carlo estimate of the
19 variational evidence lower bound (ELBO)."""
20
21
22 from functools import partial
23 import matplotlib.pyplot as plt
24
25 from jax.api import jit, grad, vmap
26 from jax import random
27 from jax.experimental import optimizers
28 import jax.numpy as np
29 import jax.scipy.stats.norm as norm
30
31
32 # ========= Functions to define the evidence lower bound. =========
33
34 def diag_gaussian_sample(rng, mean, log_std):
35 # Take a single sample from a diagonal multivariate Gaussian.
36 return mean + np.exp(log_std) * random.normal(rng, mean.shape)
37
38 def diag_gaussian_logpdf(x, mean, log_std):
39 # Evaluate a single point on a diagonal multivariate Gaussian.
40 return np.sum(vmap(norm.logpdf)(x, mean, np.exp(log_std)))
41
42 def elbo(logprob, rng, mean, log_std):
43 # Single-sample Monte Carlo estimate of the variational lower bound.
44 sample = diag_gaussian_sample(rng, mean, log_std)
45 return logprob(sample) - diag_gaussian_logpdf(sample, mean, log_std)
46
47 def batch_elbo(logprob, rng, params, num_samples):
48 # Average over a batch of random samples.
49 rngs = random.split(rng, num_samples)
50 vectorized_elbo = vmap(partial(elbo, logprob), in_axes=(0, None, None))
51 return np.mean(vectorized_elbo(rngs, *params))
52
53
54 # ========= Helper function for plotting. =========
55
56 @partial(jit, static_argnums=(0, 1, 2, 4))
57 def mesh_eval(func, x_limits, y_limits, params, num_ticks=101):
58 # Evaluate func on a 2D grid defined by x_limits and y_limits.
59 x = np.linspace(*x_limits, num=num_ticks)
60 y = np.linspace(*y_limits, num=num_ticks)
61 X, Y = np.meshgrid(x, y)
62 xy_vec = np.stack([X.ravel(), Y.ravel()]).T
63 zs = vmap(func, in_axes=(0, None))(xy_vec, params)
64 return X, Y, zs.reshape(X.shape)
65
66
67 # ========= Define an intractable unnormalized density =========
68
69 def funnel_log_density(params):
70 return norm.logpdf(params[0], 0, np.exp(params[1])) + \
71 norm.logpdf(params[1], 0, 1.35)
72
73
74 if __name__ == "__main__":
75 num_samples = 40
76
77 @jit
78 def objective(params, t):
79 rng = random.PRNGKey(t)
80 return -batch_elbo(funnel_log_density, rng, params, num_samples)
81
82 # Set up figure.
83 fig = plt.figure(figsize=(8,8), facecolor='white')
84 ax = fig.add_subplot(111, frameon=False)
85 plt.ion()
86 plt.show(block=False)
87 x_limits = [-2, 2]
88 y_limits = [-4, 2]
89 target_dist = lambda x, _: np.exp(funnel_log_density(x))
90 approx_dist = lambda x, params: np.exp(diag_gaussian_logpdf(x, *params))
91
92 def callback(params, t):
93 print("Iteration {} lower bound {}".format(t, objective(params, t)))
94
95 plt.cla()
96 X, Y, Z = mesh_eval(target_dist, x_limits, y_limits, 1)
97 ax.contour(X, Y, Z, cmap='summer')
98 X, Y, Z = mesh_eval(approx_dist, x_limits, y_limits, params)
99 ax.contour(X, Y, Z, cmap='winter')
100 ax.set_xlim(x_limits)
101 ax.set_ylim(y_limits)
102 ax.set_yticks([])
103 ax.set_xticks([])
104
105 # Plot random samples from variational distribution.
106 # Here we clone the rng used in computing the objective
107 # so that we can show exactly the same samples.
108 rngs = random.split(random.PRNGKey(t), num_samples)
109 samples = vmap(diag_gaussian_sample, in_axes=(0, None, None))(rngs, *params)
110 ax.plot(samples[:, 0], samples[:, 1], 'b.')
111
112 plt.draw()
113 plt.pause(1.0/60.0)
114
115
116 # Set up optimizer.
117 D = 2
118 init_mean = np.zeros(D)
119 init_std = np.zeros(D)
120 init_params = (init_mean, init_std)
121 opt_init, opt_update, get_params = optimizers.momentum(step_size=0.1, mass=0.9)
122 opt_state = opt_init(init_params)
123
124 @jit
125 def update(i, opt_state):
126 params = get_params(opt_state)
127 gradient = grad(objective)(params, i)
128 return opt_update(i, gradient, opt_state)
129
130
131 # Main loop.
132 print("Optimizing variational parameters...")
133 for t in range(100):
134 opt_state = update(t, opt_state)
135 params = get_params(opt_state)
136 callback(params, t)
137 plt.show(block=True)
138
[end of examples/advi.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/advi.py b/examples/advi.py
--- a/examples/advi.py
+++ b/examples/advi.py
@@ -54,7 +54,7 @@
# ========= Helper function for plotting. =========
@partial(jit, static_argnums=(0, 1, 2, 4))
-def mesh_eval(func, x_limits, y_limits, params, num_ticks=101):
+def _mesh_eval(func, x_limits, y_limits, params, num_ticks):
# Evaluate func on a 2D grid defined by x_limits and y_limits.
x = np.linspace(*x_limits, num=num_ticks)
y = np.linspace(*y_limits, num=num_ticks)
@@ -63,6 +63,8 @@
zs = vmap(func, in_axes=(0, None))(xy_vec, params)
return X, Y, zs.reshape(X.shape)
+def mesh_eval(func, x_limits, y_limits, params, num_ticks=101):
+ return _mesh_eval(func, x_limits, y_limits, params, num_ticks)
# ========= Define an intractable unnormalized density =========
| {"golden_diff": "diff --git a/examples/advi.py b/examples/advi.py\n--- a/examples/advi.py\n+++ b/examples/advi.py\n@@ -54,7 +54,7 @@\n # ========= Helper function for plotting. =========\n \n @partial(jit, static_argnums=(0, 1, 2, 4))\n-def mesh_eval(func, x_limits, y_limits, params, num_ticks=101):\n+def _mesh_eval(func, x_limits, y_limits, params, num_ticks):\n # Evaluate func on a 2D grid defined by x_limits and y_limits.\n x = np.linspace(*x_limits, num=num_ticks)\n y = np.linspace(*y_limits, num=num_ticks)\n@@ -63,6 +63,8 @@\n zs = vmap(func, in_axes=(0, None))(xy_vec, params)\n return X, Y, zs.reshape(X.shape)\n \n+def mesh_eval(func, x_limits, y_limits, params, num_ticks=101):\n+ return _mesh_eval(func, x_limits, y_limits, params, num_ticks)\n \n # ========= Define an intractable unnormalized density =========\n", "issue": "advi.py example is broken with jit\nadvi.py example is broken with jit: @partial(jit, static_argnums=(0, 1, 2, 4))\r\nIf jit is removed, then It works.\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Automatic differentiation variational inference in Numpy and JAX.\n\nThis demo fits a Gaussian approximation to an intractable, unnormalized\ndensity, by differentiating through a Monte Carlo estimate of the\nvariational evidence lower bound (ELBO).\"\"\"\n\n\nfrom functools import partial\nimport matplotlib.pyplot as plt\n\nfrom jax.api import jit, grad, vmap\nfrom jax import random\nfrom jax.experimental import optimizers\nimport jax.numpy as np\nimport jax.scipy.stats.norm as norm\n\n\n# ========= Functions to define the evidence lower bound. =========\n\ndef diag_gaussian_sample(rng, mean, log_std):\n # Take a single sample from a diagonal multivariate Gaussian.\n return mean + np.exp(log_std) * random.normal(rng, mean.shape)\n\ndef diag_gaussian_logpdf(x, mean, log_std):\n # Evaluate a single point on a diagonal multivariate Gaussian.\n return np.sum(vmap(norm.logpdf)(x, mean, np.exp(log_std)))\n\ndef elbo(logprob, rng, mean, log_std):\n # Single-sample Monte Carlo estimate of the variational lower bound.\n sample = diag_gaussian_sample(rng, mean, log_std)\n return logprob(sample) - diag_gaussian_logpdf(sample, mean, log_std)\n\ndef batch_elbo(logprob, rng, params, num_samples):\n # Average over a batch of random samples.\n rngs = random.split(rng, num_samples)\n vectorized_elbo = vmap(partial(elbo, logprob), in_axes=(0, None, None))\n return np.mean(vectorized_elbo(rngs, *params))\n\n\n# ========= Helper function for plotting. =========\n\n@partial(jit, static_argnums=(0, 1, 2, 4))\ndef mesh_eval(func, x_limits, y_limits, params, num_ticks=101):\n # Evaluate func on a 2D grid defined by x_limits and y_limits.\n x = np.linspace(*x_limits, num=num_ticks)\n y = np.linspace(*y_limits, num=num_ticks)\n X, Y = np.meshgrid(x, y)\n xy_vec = np.stack([X.ravel(), Y.ravel()]).T\n zs = vmap(func, in_axes=(0, None))(xy_vec, params)\n return X, Y, zs.reshape(X.shape)\n\n\n# ========= Define an intractable unnormalized density =========\n\ndef funnel_log_density(params):\n return norm.logpdf(params[0], 0, np.exp(params[1])) + \\\n norm.logpdf(params[1], 0, 1.35)\n\n\nif __name__ == \"__main__\":\n num_samples = 40\n\n @jit\n def objective(params, t):\n rng = random.PRNGKey(t)\n return -batch_elbo(funnel_log_density, rng, params, num_samples)\n\n # Set up figure.\n fig = plt.figure(figsize=(8,8), facecolor='white')\n ax = fig.add_subplot(111, frameon=False)\n plt.ion()\n plt.show(block=False)\n x_limits = [-2, 2]\n y_limits = [-4, 2]\n target_dist = lambda x, _: np.exp(funnel_log_density(x))\n approx_dist = lambda x, params: np.exp(diag_gaussian_logpdf(x, *params))\n\n def callback(params, t):\n print(\"Iteration {} lower bound {}\".format(t, objective(params, t)))\n\n plt.cla()\n X, Y, Z = mesh_eval(target_dist, x_limits, y_limits, 1)\n ax.contour(X, Y, Z, cmap='summer')\n X, Y, Z = mesh_eval(approx_dist, x_limits, y_limits, params)\n ax.contour(X, Y, Z, cmap='winter')\n ax.set_xlim(x_limits)\n ax.set_ylim(y_limits)\n ax.set_yticks([])\n ax.set_xticks([])\n\n # Plot random samples from variational distribution.\n # Here we clone the rng used in computing the objective\n # so that we can show exactly the same samples.\n rngs = random.split(random.PRNGKey(t), num_samples)\n samples = vmap(diag_gaussian_sample, in_axes=(0, None, None))(rngs, *params)\n ax.plot(samples[:, 0], samples[:, 1], 'b.')\n\n plt.draw()\n plt.pause(1.0/60.0)\n\n\n # Set up optimizer.\n D = 2\n init_mean = np.zeros(D)\n init_std = np.zeros(D)\n init_params = (init_mean, init_std)\n opt_init, opt_update, get_params = optimizers.momentum(step_size=0.1, mass=0.9)\n opt_state = opt_init(init_params)\n\n @jit\n def update(i, opt_state):\n params = get_params(opt_state)\n gradient = grad(objective)(params, i)\n return opt_update(i, gradient, opt_state)\n\n\n # Main loop.\n print(\"Optimizing variational parameters...\")\n for t in range(100):\n opt_state = update(t, opt_state)\n params = get_params(opt_state)\n callback(params, t)\n plt.show(block=True)\n", "path": "examples/advi.py"}]} | 2,159 | 250 |
gh_patches_debug_24579 | rasdani/github-patches | git_diff | huggingface__diffusers-6564 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OSError: h94/IP-Adapter does not appear to have a file named models\image_encoder\config.json
### Describe the bug
I'm getting this error:
`OSError: h94/IP-Adapter does not appear to have a file named models\image_encoder\config.json. Checkout 'https://huggingface.co/h94/IP-Adapter/main' for available files.`
When running the IP-Adapter example from here: https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters
### Reproduction
Run the IP-Adapter example from here: https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters
```
import torch
from diffusers import StableDiffusionPipeline, DDIMScheduler
from diffusers.utils import load_image
noise_scheduler = DDIMScheduler(
num_train_timesteps=1000,
beta_start=0.00085,
beta_end=0.012,
beta_schedule="scaled_linear",
clip_sample=False,
set_alpha_to_one=False,
steps_offset=1
)
pipeline = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
torch_dtype=torch.float16,
scheduler=noise_scheduler,
).to("cuda")
pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-full-face_sd15.bin")
pipeline.set_ip_adapter_scale(0.7)
image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ai_face2.png")
generator = torch.Generator(device="cpu").manual_seed(33)
image = pipeline(
prompt="A photo of a girl wearing a black dress, holding red roses in hand, upper body, behind is the Eiffel Tower",
ip_adapter_image=image,
negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
num_inference_steps=50, num_images_per_prompt=1, width=512, height=704,
generator=generator,
).images[0]
```
OSError: h94/IP-Adapter does not appear to have a file named models\image_encoder\config.json. Checkout 'https://huggingface.co/h94/IP-Adapter/main' for available files.
### Logs
```shell
OSError: h94/IP-Adapter does not appear to have a file named models\image_encoder\config.json. Checkout 'https://huggingface.co/h94/IP-Adapter/main' for available files.
```
### System Info
Win 11
Diffusers 0.25.0
### Who can help?
@sayakpaul @yiyixuxu @DN6 @patrickvonplaten
</issue>
<code>
[start of src/diffusers/loaders/ip_adapter.py]
1 # Copyright 2023 The HuggingFace Team. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import os
15 from typing import Dict, Union
16
17 import torch
18 from huggingface_hub.utils import validate_hf_hub_args
19 from safetensors import safe_open
20
21 from ..utils import (
22 _get_model_file,
23 is_transformers_available,
24 logging,
25 )
26
27
28 if is_transformers_available():
29 from transformers import (
30 CLIPImageProcessor,
31 CLIPVisionModelWithProjection,
32 )
33
34 from ..models.attention_processor import (
35 IPAdapterAttnProcessor,
36 IPAdapterAttnProcessor2_0,
37 )
38
39 logger = logging.get_logger(__name__)
40
41
42 class IPAdapterMixin:
43 """Mixin for handling IP Adapters."""
44
45 @validate_hf_hub_args
46 def load_ip_adapter(
47 self,
48 pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
49 subfolder: str,
50 weight_name: str,
51 **kwargs,
52 ):
53 """
54 Parameters:
55 pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
56 Can be either:
57
58 - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
59 the Hub.
60 - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
61 with [`ModelMixin.save_pretrained`].
62 - A [torch state
63 dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
64
65 cache_dir (`Union[str, os.PathLike]`, *optional*):
66 Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
67 is not used.
68 force_download (`bool`, *optional*, defaults to `False`):
69 Whether or not to force the (re-)download of the model weights and configuration files, overriding the
70 cached versions if they exist.
71 resume_download (`bool`, *optional*, defaults to `False`):
72 Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
73 incompletely downloaded files are deleted.
74 proxies (`Dict[str, str]`, *optional*):
75 A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
76 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
77 local_files_only (`bool`, *optional*, defaults to `False`):
78 Whether to only load local model weights and configuration files or not. If set to `True`, the model
79 won't be downloaded from the Hub.
80 token (`str` or *bool*, *optional*):
81 The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
82 `diffusers-cli login` (stored in `~/.huggingface`) is used.
83 revision (`str`, *optional*, defaults to `"main"`):
84 The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
85 allowed by Git.
86 subfolder (`str`, *optional*, defaults to `""`):
87 The subfolder location of a model file within a larger model repository on the Hub or locally.
88 """
89
90 # Load the main state dict first.
91 cache_dir = kwargs.pop("cache_dir", None)
92 force_download = kwargs.pop("force_download", False)
93 resume_download = kwargs.pop("resume_download", False)
94 proxies = kwargs.pop("proxies", None)
95 local_files_only = kwargs.pop("local_files_only", None)
96 token = kwargs.pop("token", None)
97 revision = kwargs.pop("revision", None)
98
99 user_agent = {
100 "file_type": "attn_procs_weights",
101 "framework": "pytorch",
102 }
103
104 if not isinstance(pretrained_model_name_or_path_or_dict, dict):
105 model_file = _get_model_file(
106 pretrained_model_name_or_path_or_dict,
107 weights_name=weight_name,
108 cache_dir=cache_dir,
109 force_download=force_download,
110 resume_download=resume_download,
111 proxies=proxies,
112 local_files_only=local_files_only,
113 token=token,
114 revision=revision,
115 subfolder=subfolder,
116 user_agent=user_agent,
117 )
118 if weight_name.endswith(".safetensors"):
119 state_dict = {"image_proj": {}, "ip_adapter": {}}
120 with safe_open(model_file, framework="pt", device="cpu") as f:
121 for key in f.keys():
122 if key.startswith("image_proj."):
123 state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
124 elif key.startswith("ip_adapter."):
125 state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
126 else:
127 state_dict = torch.load(model_file, map_location="cpu")
128 else:
129 state_dict = pretrained_model_name_or_path_or_dict
130
131 keys = list(state_dict.keys())
132 if keys != ["image_proj", "ip_adapter"]:
133 raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.")
134
135 # load CLIP image encoder here if it has not been registered to the pipeline yet
136 if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None:
137 if not isinstance(pretrained_model_name_or_path_or_dict, dict):
138 logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}")
139 image_encoder = CLIPVisionModelWithProjection.from_pretrained(
140 pretrained_model_name_or_path_or_dict,
141 subfolder=os.path.join(subfolder, "image_encoder"),
142 ).to(self.device, dtype=self.dtype)
143 self.image_encoder = image_encoder
144 self.register_to_config(image_encoder=["transformers", "CLIPVisionModelWithProjection"])
145 else:
146 raise ValueError("`image_encoder` cannot be None when using IP Adapters.")
147
148 # create feature extractor if it has not been registered to the pipeline yet
149 if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None:
150 self.feature_extractor = CLIPImageProcessor()
151 self.register_to_config(feature_extractor=["transformers", "CLIPImageProcessor"])
152
153 # load ip-adapter into unet
154 unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
155 unet._load_ip_adapter_weights(state_dict)
156
157 def set_ip_adapter_scale(self, scale):
158 unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
159 for attn_processor in unet.attn_processors.values():
160 if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):
161 attn_processor.scale = scale
162
163 def unload_ip_adapter(self):
164 """
165 Unloads the IP Adapter weights
166
167 Examples:
168
169 ```python
170 >>> # Assuming `pipeline` is already loaded with the IP Adapter weights.
171 >>> pipeline.unload_ip_adapter()
172 >>> ...
173 ```
174 """
175 # remove CLIP image encoder
176 if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is not None:
177 self.image_encoder = None
178 self.register_to_config(image_encoder=[None, None])
179
180 # remove feature extractor
181 if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is not None:
182 self.feature_extractor = None
183 self.register_to_config(feature_extractor=[None, None])
184
185 # remove hidden encoder
186 self.unet.encoder_hid_proj = None
187 self.config.encoder_hid_dim_type = None
188
189 # restore original Unet attention processors layers
190 self.unet.set_default_attn_processor()
191
[end of src/diffusers/loaders/ip_adapter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/diffusers/loaders/ip_adapter.py b/src/diffusers/loaders/ip_adapter.py
--- a/src/diffusers/loaders/ip_adapter.py
+++ b/src/diffusers/loaders/ip_adapter.py
@@ -11,7 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-import os
+from pathlib import Path
from typing import Dict, Union
import torch
@@ -138,7 +138,7 @@
logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}")
image_encoder = CLIPVisionModelWithProjection.from_pretrained(
pretrained_model_name_or_path_or_dict,
- subfolder=os.path.join(subfolder, "image_encoder"),
+ subfolder=Path(subfolder, "image_encoder").as_posix(),
).to(self.device, dtype=self.dtype)
self.image_encoder = image_encoder
self.register_to_config(image_encoder=["transformers", "CLIPVisionModelWithProjection"])
| {"golden_diff": "diff --git a/src/diffusers/loaders/ip_adapter.py b/src/diffusers/loaders/ip_adapter.py\n--- a/src/diffusers/loaders/ip_adapter.py\n+++ b/src/diffusers/loaders/ip_adapter.py\n@@ -11,7 +11,7 @@\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n-import os\n+from pathlib import Path\n from typing import Dict, Union\n \n import torch\n@@ -138,7 +138,7 @@\n logger.info(f\"loading image_encoder from {pretrained_model_name_or_path_or_dict}\")\n image_encoder = CLIPVisionModelWithProjection.from_pretrained(\n pretrained_model_name_or_path_or_dict,\n- subfolder=os.path.join(subfolder, \"image_encoder\"),\n+ subfolder=Path(subfolder, \"image_encoder\").as_posix(),\n ).to(self.device, dtype=self.dtype)\n self.image_encoder = image_encoder\n self.register_to_config(image_encoder=[\"transformers\", \"CLIPVisionModelWithProjection\"])\n", "issue": "OSError: h94/IP-Adapter does not appear to have a file named models\\image_encoder\\config.json\n### Describe the bug\r\n\r\nI'm getting this error:\r\n\r\n`OSError: h94/IP-Adapter does not appear to have a file named models\\image_encoder\\config.json. Checkout 'https://huggingface.co/h94/IP-Adapter/main' for available files.`\r\n\r\nWhen running the IP-Adapter example from here: https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters\r\n\r\n### Reproduction\r\n\r\nRun the IP-Adapter example from here: https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters\r\n\r\n```\r\nimport torch\r\nfrom diffusers import StableDiffusionPipeline, DDIMScheduler\r\nfrom diffusers.utils import load_image\r\n\r\nnoise_scheduler = DDIMScheduler(\r\n num_train_timesteps=1000,\r\n beta_start=0.00085,\r\n beta_end=0.012,\r\n beta_schedule=\"scaled_linear\",\r\n clip_sample=False,\r\n set_alpha_to_one=False,\r\n steps_offset=1\r\n)\r\n\r\npipeline = StableDiffusionPipeline.from_pretrained(\r\n \"runwayml/stable-diffusion-v1-5\",\r\n torch_dtype=torch.float16,\r\n scheduler=noise_scheduler,\r\n).to(\"cuda\")\r\n\r\npipeline.load_ip_adapter(\"h94/IP-Adapter\", subfolder=\"models\", weight_name=\"ip-adapter-full-face_sd15.bin\")\r\n\r\npipeline.set_ip_adapter_scale(0.7)\r\n\r\nimage = load_image(\"https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/ai_face2.png\")\r\n\r\ngenerator = torch.Generator(device=\"cpu\").manual_seed(33)\r\n\r\nimage = pipeline(\r\n prompt=\"A photo of a girl wearing a black dress, holding red roses in hand, upper body, behind is the Eiffel Tower\",\r\n ip_adapter_image=image,\r\n negative_prompt=\"monochrome, lowres, bad anatomy, worst quality, low quality\", \r\n num_inference_steps=50, num_images_per_prompt=1, width=512, height=704,\r\n generator=generator,\r\n).images[0]\r\n```\r\n\r\nOSError: h94/IP-Adapter does not appear to have a file named models\\image_encoder\\config.json. Checkout 'https://huggingface.co/h94/IP-Adapter/main' for available files.\r\n\r\n### Logs\r\n\r\n```shell\r\nOSError: h94/IP-Adapter does not appear to have a file named models\\image_encoder\\config.json. Checkout 'https://huggingface.co/h94/IP-Adapter/main' for available files.\r\n```\r\n\r\n\r\n### System Info\r\n\r\nWin 11\r\nDiffusers 0.25.0\r\n\r\n### Who can help?\r\n\r\n@sayakpaul @yiyixuxu @DN6 @patrickvonplaten\n", "before_files": [{"content": "# Copyright 2023 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom typing import Dict, Union\n\nimport torch\nfrom huggingface_hub.utils import validate_hf_hub_args\nfrom safetensors import safe_open\n\nfrom ..utils import (\n _get_model_file,\n is_transformers_available,\n logging,\n)\n\n\nif is_transformers_available():\n from transformers import (\n CLIPImageProcessor,\n CLIPVisionModelWithProjection,\n )\n\n from ..models.attention_processor import (\n IPAdapterAttnProcessor,\n IPAdapterAttnProcessor2_0,\n )\n\nlogger = logging.get_logger(__name__)\n\n\nclass IPAdapterMixin:\n \"\"\"Mixin for handling IP Adapters.\"\"\"\n\n @validate_hf_hub_args\n def load_ip_adapter(\n self,\n pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],\n subfolder: str,\n weight_name: str,\n **kwargs,\n ):\n \"\"\"\n Parameters:\n pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):\n Can be either:\n\n - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on\n the Hub.\n - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved\n with [`ModelMixin.save_pretrained`].\n - A [torch state\n dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n The subfolder location of a model file within a larger model repository on the Hub or locally.\n \"\"\"\n\n # Load the main state dict first.\n cache_dir = kwargs.pop(\"cache_dir\", None)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", None)\n token = kwargs.pop(\"token\", None)\n revision = kwargs.pop(\"revision\", None)\n\n user_agent = {\n \"file_type\": \"attn_procs_weights\",\n \"framework\": \"pytorch\",\n }\n\n if not isinstance(pretrained_model_name_or_path_or_dict, dict):\n model_file = _get_model_file(\n pretrained_model_name_or_path_or_dict,\n weights_name=weight_name,\n cache_dir=cache_dir,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n token=token,\n revision=revision,\n subfolder=subfolder,\n user_agent=user_agent,\n )\n if weight_name.endswith(\".safetensors\"):\n state_dict = {\"image_proj\": {}, \"ip_adapter\": {}}\n with safe_open(model_file, framework=\"pt\", device=\"cpu\") as f:\n for key in f.keys():\n if key.startswith(\"image_proj.\"):\n state_dict[\"image_proj\"][key.replace(\"image_proj.\", \"\")] = f.get_tensor(key)\n elif key.startswith(\"ip_adapter.\"):\n state_dict[\"ip_adapter\"][key.replace(\"ip_adapter.\", \"\")] = f.get_tensor(key)\n else:\n state_dict = torch.load(model_file, map_location=\"cpu\")\n else:\n state_dict = pretrained_model_name_or_path_or_dict\n\n keys = list(state_dict.keys())\n if keys != [\"image_proj\", \"ip_adapter\"]:\n raise ValueError(\"Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.\")\n\n # load CLIP image encoder here if it has not been registered to the pipeline yet\n if hasattr(self, \"image_encoder\") and getattr(self, \"image_encoder\", None) is None:\n if not isinstance(pretrained_model_name_or_path_or_dict, dict):\n logger.info(f\"loading image_encoder from {pretrained_model_name_or_path_or_dict}\")\n image_encoder = CLIPVisionModelWithProjection.from_pretrained(\n pretrained_model_name_or_path_or_dict,\n subfolder=os.path.join(subfolder, \"image_encoder\"),\n ).to(self.device, dtype=self.dtype)\n self.image_encoder = image_encoder\n self.register_to_config(image_encoder=[\"transformers\", \"CLIPVisionModelWithProjection\"])\n else:\n raise ValueError(\"`image_encoder` cannot be None when using IP Adapters.\")\n\n # create feature extractor if it has not been registered to the pipeline yet\n if hasattr(self, \"feature_extractor\") and getattr(self, \"feature_extractor\", None) is None:\n self.feature_extractor = CLIPImageProcessor()\n self.register_to_config(feature_extractor=[\"transformers\", \"CLIPImageProcessor\"])\n\n # load ip-adapter into unet\n unet = getattr(self, self.unet_name) if not hasattr(self, \"unet\") else self.unet\n unet._load_ip_adapter_weights(state_dict)\n\n def set_ip_adapter_scale(self, scale):\n unet = getattr(self, self.unet_name) if not hasattr(self, \"unet\") else self.unet\n for attn_processor in unet.attn_processors.values():\n if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):\n attn_processor.scale = scale\n\n def unload_ip_adapter(self):\n \"\"\"\n Unloads the IP Adapter weights\n\n Examples:\n\n ```python\n >>> # Assuming `pipeline` is already loaded with the IP Adapter weights.\n >>> pipeline.unload_ip_adapter()\n >>> ...\n ```\n \"\"\"\n # remove CLIP image encoder\n if hasattr(self, \"image_encoder\") and getattr(self, \"image_encoder\", None) is not None:\n self.image_encoder = None\n self.register_to_config(image_encoder=[None, None])\n\n # remove feature extractor\n if hasattr(self, \"feature_extractor\") and getattr(self, \"feature_extractor\", None) is not None:\n self.feature_extractor = None\n self.register_to_config(feature_extractor=[None, None])\n\n # remove hidden encoder\n self.unet.encoder_hid_proj = None\n self.config.encoder_hid_dim_type = None\n\n # restore original Unet attention processors layers\n self.unet.set_default_attn_processor()\n", "path": "src/diffusers/loaders/ip_adapter.py"}]} | 3,441 | 235 |
gh_patches_debug_15104 | rasdani/github-patches | git_diff | dotkom__onlineweb4-606 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Order of events in event archive is non-logical
As it is now:
28.11.13
04.04.14
16.01.14
23.11.13
27.02.14
and so on...
</issue>
<code>
[start of apps/events/views.py]
1 #-*- coding: utf-8 -*-
2
3 import datetime
4
5 from django.utils import timezone
6
7 from django.conf import settings
8 from django.contrib import messages
9 from django.contrib.auth.decorators import login_required, user_passes_test
10 from django.core.urlresolvers import reverse
11 from django.http import HttpResponseRedirect
12 from django.shortcuts import render, get_object_or_404, redirect
13 from django.utils.translation import ugettext as _
14
15 import watson
16
17 from apps.events.forms import CaptchaForm
18 from apps.events.models import Event, AttendanceEvent, Attendee
19 from apps.events.pdf_generator import EventPDF
20
21
22 def index(request):
23 return render(request, 'events/index.html', {})
24
25 def details(request, event_id, event_slug):
26 event = get_object_or_404(Event, pk=event_id)
27
28 is_attendance_event = False
29 user_anonymous = True
30 user_attending = False
31 place_on_wait_list = 0
32 will_be_on_wait_list = False
33 rules = []
34 user_status = False
35
36 try:
37 attendance_event = AttendanceEvent.objects.get(pk=event_id)
38 is_attendance_event = True
39 form = CaptchaForm(user=request.user)
40
41 if attendance_event.rule_bundles:
42 for rule_bundle in attendance_event.rule_bundles.all():
43 rules.append(rule_bundle.get_rule_strings)
44
45 if request.user.is_authenticated():
46 user_anonymous = False
47 if attendance_event.is_attendee(request.user):
48 user_attending = True
49
50
51 will_be_on_wait_list = attendance_event.will_i_be_on_wait_list
52
53 user_status = event.is_eligible_for_signup(request.user)
54
55 # Check if this user is on the waitlist
56 place_on_wait_list = event.what_place_is_user_on_wait_list(request.user)
57
58 except AttendanceEvent.DoesNotExist:
59 pass
60
61 if is_attendance_event:
62 context = {
63 'event': event,
64 'attendance_event': attendance_event,
65 'user_anonymous': user_anonymous,
66 'user_attending': user_attending,
67 'will_be_on_wait_list': will_be_on_wait_list,
68 'rules': rules,
69 'user_status': user_status,
70 'place_on_wait_list': int(place_on_wait_list),
71 #'position_in_wait_list': position_in_wait_list,
72 'captcha_form': form,
73 }
74
75 return render(request, 'events/details.html', context)
76 else:
77 return render(request, 'events/details.html', {'event': event})
78
79
80 def get_attendee(attendee_id):
81 return get_object_or_404(Attendee, pk=attendee_id)
82
83 @login_required
84 def attendEvent(request, event_id):
85
86 event = get_object_or_404(Event, pk=event_id)
87
88 if not request.POST:
89 messages.error(request, _(u'Vennligst fyll ut skjemaet.'))
90 return redirect(event)
91 form = CaptchaForm(request.POST, user=request.user)
92
93 if not form.is_valid():
94 if not 'mark_rules' in request.POST and not request.user.mark_rules:
95 error_message = u'Du må godta prikkreglene for å melde deg på.'
96 else:
97 error_message = u'Du klarte ikke captcha-en. Er du en bot?'
98 messages.error(request, _(error_message))
99 return redirect(event)
100
101 # Check if the user is eligible to attend this event.
102 # If not, an error message will be present in the returned dict
103 attendance_event = event.attendance_event
104
105 response = event.is_eligible_for_signup(request.user);
106
107 if response['status']:
108 # First time accepting mark rules
109 if 'mark_rules' in form.cleaned_data:
110 request.user.mark_rules = True
111 request.user.save()
112 Attendee(event=attendance_event, user=request.user).save()
113 messages.success(request, _(u"Du er nå påmeldt på arrangementet!"))
114 return redirect(event)
115 else:
116 messages.error(request, response['message'])
117 return redirect(event)
118
119 @login_required
120 def unattendEvent(request, event_id):
121
122 event = get_object_or_404(Event, pk=event_id)
123 attendance_event = event.attendance_event
124 Attendee.objects.get(event=attendance_event, user=request.user).delete()
125
126 messages.success(request, _(u"Du ble meldt av arrangementet."))
127 return redirect(event)
128
129 def search_events(request):
130 query = request.GET.get('query')
131 filters = {
132 'future' : request.GET.get('future'),
133 'myevents' : request.GET.get('myevents')
134 }
135 events = _search_indexed(request, query, filters)
136
137 return render(request, 'events/search.html', {'events': events})
138
139
140 def _search_indexed(request, query, filters):
141 results = []
142 kwargs = {}
143
144 if filters['future'] == 'true':
145 kwargs['event_start__gte'] = timezone.now()
146
147 if filters['myevents'] == 'true':
148 kwargs['attendance_event__attendees'] = request.user
149
150 if query:
151 for result in watson.search(query, models=(
152 Event.objects.filter(**kwargs).prefetch_related(
153 'attendance_event', 'attendance_event__attendees'),)):
154 results.append(result.object)
155 return results[:10]
156
157 return Event.objects.filter(**kwargs).prefetch_related(
158 'attendance_event', 'attendance_event__attendees')
159
160
161 @login_required()
162 @user_passes_test(lambda u: u.groups.filter(name='Komiteer').count() == 1)
163 def generate_pdf(request, event_id):
164 event = get_object_or_404(Event, pk=event_id)
165 return EventPDF(event).render_pdf()
[end of apps/events/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/apps/events/views.py b/apps/events/views.py
--- a/apps/events/views.py
+++ b/apps/events/views.py
@@ -147,15 +147,15 @@
if filters['myevents'] == 'true':
kwargs['attendance_event__attendees'] = request.user
+ events = Event.objects.filter(**kwargs).order_by('event_start').prefetch_related(
+ 'attendance_event', 'attendance_event__attendees')
+
if query:
- for result in watson.search(query, models=(
- Event.objects.filter(**kwargs).prefetch_related(
- 'attendance_event', 'attendance_event__attendees'),)):
+ for result in watson.search(query, models=(events,)):
results.append(result.object)
return results[:10]
- return Event.objects.filter(**kwargs).prefetch_related(
- 'attendance_event', 'attendance_event__attendees')
+ return events
@login_required()
| {"golden_diff": "diff --git a/apps/events/views.py b/apps/events/views.py\n--- a/apps/events/views.py\n+++ b/apps/events/views.py\n@@ -147,15 +147,15 @@\n if filters['myevents'] == 'true':\n kwargs['attendance_event__attendees'] = request.user\n \n+ events = Event.objects.filter(**kwargs).order_by('event_start').prefetch_related(\n+ 'attendance_event', 'attendance_event__attendees')\n+\n if query:\n- for result in watson.search(query, models=(\n- Event.objects.filter(**kwargs).prefetch_related(\n- 'attendance_event', 'attendance_event__attendees'),)):\n+ for result in watson.search(query, models=(events,)):\n results.append(result.object)\n return results[:10]\n \n- return Event.objects.filter(**kwargs).prefetch_related(\n- 'attendance_event', 'attendance_event__attendees')\n+ return events\n \n \n @login_required()\n", "issue": "Order of events in event archive is non-logical\nAs it is now:\n\n28.11.13\n04.04.14\n16.01.14\n23.11.13\n27.02.14\n\nand so on...\n\n", "before_files": [{"content": "#-*- coding: utf-8 -*-\n\nimport datetime\n\nfrom django.utils import timezone\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils.translation import ugettext as _\n\nimport watson\n\nfrom apps.events.forms import CaptchaForm\nfrom apps.events.models import Event, AttendanceEvent, Attendee\nfrom apps.events.pdf_generator import EventPDF\n\n\ndef index(request):\n return render(request, 'events/index.html', {})\n\ndef details(request, event_id, event_slug):\n event = get_object_or_404(Event, pk=event_id)\n\n is_attendance_event = False\n user_anonymous = True\n user_attending = False\n place_on_wait_list = 0\n will_be_on_wait_list = False\n rules = []\n user_status = False\n\n try:\n attendance_event = AttendanceEvent.objects.get(pk=event_id)\n is_attendance_event = True\n form = CaptchaForm(user=request.user)\n\n if attendance_event.rule_bundles:\n for rule_bundle in attendance_event.rule_bundles.all():\n rules.append(rule_bundle.get_rule_strings)\n\n if request.user.is_authenticated():\n user_anonymous = False\n if attendance_event.is_attendee(request.user):\n user_attending = True\n\n \n will_be_on_wait_list = attendance_event.will_i_be_on_wait_list\n\n user_status = event.is_eligible_for_signup(request.user)\n\n # Check if this user is on the waitlist\n place_on_wait_list = event.what_place_is_user_on_wait_list(request.user)\n\n except AttendanceEvent.DoesNotExist:\n pass\n\n if is_attendance_event:\n context = {\n 'event': event,\n 'attendance_event': attendance_event,\n 'user_anonymous': user_anonymous,\n 'user_attending': user_attending,\n 'will_be_on_wait_list': will_be_on_wait_list,\n 'rules': rules,\n 'user_status': user_status,\n 'place_on_wait_list': int(place_on_wait_list),\n #'position_in_wait_list': position_in_wait_list,\n 'captcha_form': form,\n }\n \n return render(request, 'events/details.html', context)\n else:\n return render(request, 'events/details.html', {'event': event})\n\n\ndef get_attendee(attendee_id):\n return get_object_or_404(Attendee, pk=attendee_id)\n\n@login_required\ndef attendEvent(request, event_id):\n \n event = get_object_or_404(Event, pk=event_id)\n\n if not request.POST:\n messages.error(request, _(u'Vennligst fyll ut skjemaet.'))\n return redirect(event)\n form = CaptchaForm(request.POST, user=request.user)\n\n if not form.is_valid():\n if not 'mark_rules' in request.POST and not request.user.mark_rules:\n error_message = u'Du m\u00e5 godta prikkreglene for \u00e5 melde deg p\u00e5.'\n else:\n error_message = u'Du klarte ikke captcha-en. Er du en bot?'\n messages.error(request, _(error_message))\n return redirect(event)\n\n # Check if the user is eligible to attend this event.\n # If not, an error message will be present in the returned dict\n attendance_event = event.attendance_event\n\n response = event.is_eligible_for_signup(request.user);\n\n if response['status']: \n # First time accepting mark rules\n if 'mark_rules' in form.cleaned_data:\n request.user.mark_rules = True\n request.user.save()\n Attendee(event=attendance_event, user=request.user).save()\n messages.success(request, _(u\"Du er n\u00e5 p\u00e5meldt p\u00e5 arrangementet!\"))\n return redirect(event)\n else:\n messages.error(request, response['message'])\n return redirect(event)\n\n@login_required\ndef unattendEvent(request, event_id):\n\n event = get_object_or_404(Event, pk=event_id)\n attendance_event = event.attendance_event\n Attendee.objects.get(event=attendance_event, user=request.user).delete()\n\n messages.success(request, _(u\"Du ble meldt av arrangementet.\"))\n return redirect(event)\n\ndef search_events(request):\n query = request.GET.get('query')\n filters = {\n 'future' : request.GET.get('future'),\n 'myevents' : request.GET.get('myevents')\n }\n events = _search_indexed(request, query, filters)\n\n return render(request, 'events/search.html', {'events': events})\n\n\ndef _search_indexed(request, query, filters):\n results = []\n kwargs = {}\n\n if filters['future'] == 'true':\n kwargs['event_start__gte'] = timezone.now()\n\n if filters['myevents'] == 'true':\n kwargs['attendance_event__attendees'] = request.user\n\n if query:\n for result in watson.search(query, models=(\n Event.objects.filter(**kwargs).prefetch_related(\n 'attendance_event', 'attendance_event__attendees'),)):\n results.append(result.object)\n return results[:10]\n\n return Event.objects.filter(**kwargs).prefetch_related(\n 'attendance_event', 'attendance_event__attendees')\n\n\n@login_required()\n@user_passes_test(lambda u: u.groups.filter(name='Komiteer').count() == 1)\ndef generate_pdf(request, event_id):\n event = get_object_or_404(Event, pk=event_id)\n return EventPDF(event).render_pdf()", "path": "apps/events/views.py"}]} | 2,211 | 209 |
gh_patches_debug_38138 | rasdani/github-patches | git_diff | aws__aws-cli-483 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Option to associate public ip address in ec2 run-instance
There doesn't seem to be any way to associate a public ip address without also adding a network interface with the --network-interfaces parameter. Is it possible for this to be a top level parameter?
</issue>
<code>
[start of awscli/customizations/ec2runinstances.py]
1 # Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6 #
7 # http://aws.amazon.com/apache2.0/
8 #
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 """
14 This customization adds two new parameters to the ``ec2 run-instance``
15 command. The first, ``--secondary-private-ip-addresses`` allows a list
16 of IP addresses within the specified subnet to be associated with the
17 new instance. The second, ``--secondary-ip-address-count`` allows you
18 to specify how many additional IP addresses you want but the actual
19 address will be assigned for you.
20
21 This functionality (and much more) is also available using the
22 ``--network-interfaces`` complex argument. This just makes two of
23 the most commonly used features available more easily.
24 """
25 from awscli.arguments import CustomArgument
26
27
28 # --secondary-private-ip-address
29 SECONDARY_PRIVATE_IP_ADDRESSES_DOCS = (
30 '[EC2-VPC] A secondary private IP address for the network interface '
31 'or instance. You can specify this multiple times to assign multiple '
32 'secondary IP addresses. If you want additional private IP addresses '
33 'but do not need a specific address, use the '
34 '--secondary-private-ip-address-count option.')
35
36 # --secondary-private-ip-address-count
37 SECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS = (
38 '[EC2-VPC] The number of secondary IP addresses to assign to '
39 'the network interface or instance.')
40
41
42 def _add_params(argument_table, operation, **kwargs):
43 arg = SecondaryPrivateIpAddressesArgument(
44 name='secondary-private-ip-addresses',
45 help_text=SECONDARY_PRIVATE_IP_ADDRESSES_DOCS)
46 argument_table['secondary-private-ip-addresses'] = arg
47 arg = SecondaryPrivateIpAddressCountArgument(
48 name='secondary-private-ip-address-count',
49 help_text=SECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS)
50 argument_table['secondary-private-ip-address-count'] = arg
51
52
53 def _check_args(parsed_args, **kwargs):
54 # This function checks the parsed args. If the user specified
55 # the --network-interfaces option with any of the scalar options we
56 # raise an error.
57 arg_dict = vars(parsed_args)
58 if arg_dict['network_interfaces']:
59 for key in ('secondary_private_ip_addresses',
60 'secondary_private_ip_address_count'):
61 if arg_dict[key]:
62 msg = ('Mixing the --network-interfaces option '
63 'with the simple, scalar options is '
64 'not supported.')
65 raise ValueError(msg)
66
67 EVENTS = [
68 ('building-argument-table.ec2.run-instances', _add_params),
69 ('operation-args-parsed.ec2.run-instances', _check_args),
70 ]
71
72
73 def register_runinstances(event_handler):
74 # Register all of the events for customizing BundleInstance
75 for event, handler in EVENTS:
76 event_handler.register(event, handler)
77
78
79 def _build_network_interfaces(params, key, value):
80 # Build up the NetworkInterfaces data structure
81 if 'network_interfaces' not in params:
82 params['network_interfaces'] = [{'DeviceIndex': 0}]
83
84 if key == 'PrivateIpAddresses':
85 if 'PrivateIpAddresses' not in params['network_interfaces'][0]:
86 params['network_interfaces'][0]['PrivateIpAddresses'] = value
87 else:
88 params['network_interfaces'][0][key] = value
89
90
91 class SecondaryPrivateIpAddressesArgument(CustomArgument):
92
93 def add_to_parser(self, parser, cli_name=None):
94 parser.add_argument(self.cli_name, dest=self.py_name,
95 default=self._default, nargs='*')
96
97 def add_to_params(self, parameters, value):
98 if value:
99 value = [{'PrivateIpAddress': v, 'Primary': False} for
100 v in value]
101 _build_network_interfaces(parameters,
102 'PrivateIpAddresses',
103 value)
104
105
106 class SecondaryPrivateIpAddressCountArgument(CustomArgument):
107
108 def add_to_parser(self, parser, cli_name=None):
109 parser.add_argument(self.cli_name, dest=self.py_name,
110 default=self._default, type=int)
111
112 def add_to_params(self, parameters, value):
113 if value:
114 _build_network_interfaces(parameters,
115 'SecondaryPrivateIpAddressCount',
116 value)
117
[end of awscli/customizations/ec2runinstances.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/awscli/customizations/ec2runinstances.py b/awscli/customizations/ec2runinstances.py
--- a/awscli/customizations/ec2runinstances.py
+++ b/awscli/customizations/ec2runinstances.py
@@ -38,6 +38,12 @@
'[EC2-VPC] The number of secondary IP addresses to assign to '
'the network interface or instance.')
+# --associate-public-ip-address
+ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS = (
+ '[EC2-VPC] If specified a public IP address will be assigned '
+ 'to the new instance in a VPC.')
+
+
def _add_params(argument_table, operation, **kwargs):
arg = SecondaryPrivateIpAddressesArgument(
@@ -48,6 +54,16 @@
name='secondary-private-ip-address-count',
help_text=SECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS)
argument_table['secondary-private-ip-address-count'] = arg
+ arg = AssociatePublicIpAddressArgument(
+ name='associate-public-ip-address',
+ help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS,
+ action='store_true', group_name='associate_public_ip')
+ argument_table['associate-public-ip-address'] = arg
+ arg = NoAssociatePublicIpAddressArgument(
+ name='no-associate-public-ip-address',
+ help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS,
+ action='store_false', group_name='associate_public_ip')
+ argument_table['no-associate-public-ip-address'] = arg
def _check_args(parsed_args, **kwargs):
@@ -57,7 +73,8 @@
arg_dict = vars(parsed_args)
if arg_dict['network_interfaces']:
for key in ('secondary_private_ip_addresses',
- 'secondary_private_ip_address_count'):
+ 'secondary_private_ip_address_count',
+ 'associate_public_ip_address'):
if arg_dict[key]:
msg = ('Mixing the --network-interfaces option '
'with the simple, scalar options is '
@@ -114,3 +131,21 @@
_build_network_interfaces(parameters,
'SecondaryPrivateIpAddressCount',
value)
+
+
+class AssociatePublicIpAddressArgument(CustomArgument):
+
+ def add_to_params(self, parameters, value):
+ if value is True:
+ _build_network_interfaces(parameters,
+ 'AssociatePublicIpAddress',
+ value)
+
+
+class NoAssociatePublicIpAddressArgument(CustomArgument):
+
+ def add_to_params(self, parameters, value):
+ if value is False:
+ _build_network_interfaces(parameters,
+ 'AssociatePublicIpAddress',
+ value)
| {"golden_diff": "diff --git a/awscli/customizations/ec2runinstances.py b/awscli/customizations/ec2runinstances.py\n--- a/awscli/customizations/ec2runinstances.py\n+++ b/awscli/customizations/ec2runinstances.py\n@@ -38,6 +38,12 @@\n '[EC2-VPC] The number of secondary IP addresses to assign to '\n 'the network interface or instance.')\n \n+# --associate-public-ip-address\n+ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS = (\n+ '[EC2-VPC] If specified a public IP address will be assigned '\n+ 'to the new instance in a VPC.')\n+\n+\n \n def _add_params(argument_table, operation, **kwargs):\n arg = SecondaryPrivateIpAddressesArgument(\n@@ -48,6 +54,16 @@\n name='secondary-private-ip-address-count',\n help_text=SECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS)\n argument_table['secondary-private-ip-address-count'] = arg\n+ arg = AssociatePublicIpAddressArgument(\n+ name='associate-public-ip-address',\n+ help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS,\n+ action='store_true', group_name='associate_public_ip')\n+ argument_table['associate-public-ip-address'] = arg\n+ arg = NoAssociatePublicIpAddressArgument(\n+ name='no-associate-public-ip-address',\n+ help_text=ASSOCIATE_PUBLIC_IP_ADDRESS_DOCS,\n+ action='store_false', group_name='associate_public_ip')\n+ argument_table['no-associate-public-ip-address'] = arg\n \n \n def _check_args(parsed_args, **kwargs):\n@@ -57,7 +73,8 @@\n arg_dict = vars(parsed_args)\n if arg_dict['network_interfaces']:\n for key in ('secondary_private_ip_addresses',\n- 'secondary_private_ip_address_count'):\n+ 'secondary_private_ip_address_count',\n+ 'associate_public_ip_address'):\n if arg_dict[key]:\n msg = ('Mixing the --network-interfaces option '\n 'with the simple, scalar options is '\n@@ -114,3 +131,21 @@\n _build_network_interfaces(parameters,\n 'SecondaryPrivateIpAddressCount',\n value)\n+\n+\n+class AssociatePublicIpAddressArgument(CustomArgument):\n+\n+ def add_to_params(self, parameters, value):\n+ if value is True:\n+ _build_network_interfaces(parameters,\n+ 'AssociatePublicIpAddress',\n+ value)\n+\n+\n+class NoAssociatePublicIpAddressArgument(CustomArgument):\n+\n+ def add_to_params(self, parameters, value):\n+ if value is False:\n+ _build_network_interfaces(parameters,\n+ 'AssociatePublicIpAddress',\n+ value)\n", "issue": "Option to associate public ip address in ec2 run-instance\nThere doesn't seem to be any way to associate a public ip address without also adding a network interface with the --network-interfaces parameter. Is it possible for this to be a top level parameter?\n\n", "before_files": [{"content": "# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\"\"\"\nThis customization adds two new parameters to the ``ec2 run-instance``\ncommand. The first, ``--secondary-private-ip-addresses`` allows a list\nof IP addresses within the specified subnet to be associated with the\nnew instance. The second, ``--secondary-ip-address-count`` allows you\nto specify how many additional IP addresses you want but the actual\naddress will be assigned for you.\n\nThis functionality (and much more) is also available using the\n``--network-interfaces`` complex argument. This just makes two of\nthe most commonly used features available more easily.\n\"\"\"\nfrom awscli.arguments import CustomArgument\n\n\n# --secondary-private-ip-address\nSECONDARY_PRIVATE_IP_ADDRESSES_DOCS = (\n '[EC2-VPC] A secondary private IP address for the network interface '\n 'or instance. You can specify this multiple times to assign multiple '\n 'secondary IP addresses. If you want additional private IP addresses '\n 'but do not need a specific address, use the '\n '--secondary-private-ip-address-count option.')\n\n# --secondary-private-ip-address-count\nSECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS = (\n '[EC2-VPC] The number of secondary IP addresses to assign to '\n 'the network interface or instance.')\n\n\ndef _add_params(argument_table, operation, **kwargs):\n arg = SecondaryPrivateIpAddressesArgument(\n name='secondary-private-ip-addresses',\n help_text=SECONDARY_PRIVATE_IP_ADDRESSES_DOCS)\n argument_table['secondary-private-ip-addresses'] = arg\n arg = SecondaryPrivateIpAddressCountArgument(\n name='secondary-private-ip-address-count',\n help_text=SECONDARY_PRIVATE_IP_ADDRESS_COUNT_DOCS)\n argument_table['secondary-private-ip-address-count'] = arg\n\n\ndef _check_args(parsed_args, **kwargs):\n # This function checks the parsed args. If the user specified\n # the --network-interfaces option with any of the scalar options we\n # raise an error.\n arg_dict = vars(parsed_args)\n if arg_dict['network_interfaces']:\n for key in ('secondary_private_ip_addresses',\n 'secondary_private_ip_address_count'):\n if arg_dict[key]:\n msg = ('Mixing the --network-interfaces option '\n 'with the simple, scalar options is '\n 'not supported.')\n raise ValueError(msg)\n\nEVENTS = [\n ('building-argument-table.ec2.run-instances', _add_params),\n ('operation-args-parsed.ec2.run-instances', _check_args),\n ]\n\n\ndef register_runinstances(event_handler):\n # Register all of the events for customizing BundleInstance\n for event, handler in EVENTS:\n event_handler.register(event, handler)\n\n\ndef _build_network_interfaces(params, key, value):\n # Build up the NetworkInterfaces data structure\n if 'network_interfaces' not in params:\n params['network_interfaces'] = [{'DeviceIndex': 0}]\n\n if key == 'PrivateIpAddresses':\n if 'PrivateIpAddresses' not in params['network_interfaces'][0]:\n params['network_interfaces'][0]['PrivateIpAddresses'] = value\n else:\n params['network_interfaces'][0][key] = value\n\n\nclass SecondaryPrivateIpAddressesArgument(CustomArgument):\n\n def add_to_parser(self, parser, cli_name=None):\n parser.add_argument(self.cli_name, dest=self.py_name,\n default=self._default, nargs='*')\n\n def add_to_params(self, parameters, value):\n if value:\n value = [{'PrivateIpAddress': v, 'Primary': False} for\n v in value]\n _build_network_interfaces(parameters,\n 'PrivateIpAddresses',\n value)\n\n\nclass SecondaryPrivateIpAddressCountArgument(CustomArgument):\n\n def add_to_parser(self, parser, cli_name=None):\n parser.add_argument(self.cli_name, dest=self.py_name,\n default=self._default, type=int)\n\n def add_to_params(self, parameters, value):\n if value:\n _build_network_interfaces(parameters,\n 'SecondaryPrivateIpAddressCount',\n value)\n", "path": "awscli/customizations/ec2runinstances.py"}]} | 1,827 | 569 |
gh_patches_debug_41300 | rasdani/github-patches | git_diff | pytorch__rl-504 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[Feature Request] Add info dict key-spec pairs to `env.observation_spec` if they're provided to a gym environment through `set_info_dict_reader`
## Motivation
gym's environments can pass some more info through a dictionary `info`:
`obs, reward, done, terminated = env.step(action)`.
This dictionary can contain additional information about the state of the env: time spent doing the task, whether the task is solved etc.
We wrap the obs, reward and done in the output `TensorDict`. We can also place the info if they're numerical values (see #234):
```python
>>> from torchrl.envs.libs.gym import GymWrapper
>>> from torchrl.envs import default_info_dict_reader
>>> reader = default_info_dict_reader(["my_info_key"])
>>> # assuming "some_env-v0" returns a dict with a key "my_info_key"
>>> env = GymWrapper(gym.make("some_env-v0"))
>>> env.set_info_dict_reader(info_dict_reader=reader)
>>> tensordict = env.reset()
>>> tensordict = env.rand_step(tensordict)
>>> assert "my_info_key" in tensordict.keys()
```
Problem is that we also register what are the expected observations (domain, dtype, device etc) in an `observation_spec` attribute. It is important for us to know what to expect as output from the env. However the info is not yet registered in `observation_spec`. This is a problem for parallel environments as we would wish to pre-allocate the tensors that we place in share memory to pass information from one process to another: for that, we use the `observation_spec` which is cheaper than resetting the env.
## Solution
We should be able to provide the specs corresponding to the info keys in the `default_info_dict_reader`, and the `set_info_dict_reader` should be able to read them. If no spec is provided, a default unidimensional, floating-point unbounded spec should be assumed.
</issue>
<code>
[start of torchrl/envs/gym_like.py]
1 # Copyright (c) Meta Platforms, Inc. and affiliates.
2 #
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 from __future__ import annotations
7
8 import warnings
9 from typing import Optional, Union, Tuple, Any, Dict
10
11 import numpy as np
12 import torch
13
14 from torchrl.data import TensorDict
15 from torchrl.data.tensordict.tensordict import TensorDictBase
16 from torchrl.envs.common import _EnvWrapper
17
18 __all__ = ["GymLikeEnv", "default_info_dict_reader"]
19
20
21 class default_info_dict_reader:
22 """
23 Default info-key reader.
24
25 In cases where keys can be directly written to a tensordict (mostly if they abide to the
26 tensordict shape), one simply needs to indicate the keys to be registered during
27 instantiation.
28
29 Examples:
30 >>> from torchrl.envs.libs.gym import GymWrapper
31 >>> from torchrl.envs import default_info_dict_reader
32 >>> reader = default_info_dict_reader(["my_info_key"])
33 >>> # assuming "some_env-v0" returns a dict with a key "my_info_key"
34 >>> env = GymWrapper(gym.make("some_env-v0"))
35 >>> env.set_info_dict_reader(info_dict_reader=reader)
36 >>> tensordict = env.reset()
37 >>> tensordict = env.rand_step(tensordict)
38 >>> assert "my_info_key" in tensordict.keys()
39
40 """
41
42 def __init__(self, keys=None):
43 if keys is None:
44 keys = []
45 self.keys = keys
46
47 def __call__(
48 self, info_dict: Dict[str, Any], tensordict: TensorDictBase
49 ) -> TensorDictBase:
50 if not isinstance(info_dict, dict) and len(self.keys):
51 warnings.warn(
52 f"Found an info_dict of type {type(info_dict)} "
53 f"but expected type or subtype `dict`."
54 )
55 for key in self.keys:
56 if key in info_dict:
57 tensordict[key] = info_dict[key]
58 return tensordict
59
60
61 class GymLikeEnv(_EnvWrapper):
62 _info_dict_reader: callable
63
64 """
65 A gym-like env is an environment whose behaviour is similar to gym environments in what
66 common methods (specifically reset and step) are expected to do.
67
68
69 A `GymLikeEnv` has a `.step()` method with the following signature:
70
71 ``env.step(action: np.ndarray) -> Tuple[Union[np.ndarray, dict], double, bool, *info]``
72
73 where the outputs are the observation, reward and done state respectively.
74 In this implementation, the info output is discarded (but specific keys can be read
75 by updating info_dict_reader, see `set_info_dict_reader` class method).
76
77 By default, the first output is written at the "next_observation" key-value pair in the output tensordict, unless
78 the first output is a dictionary. In that case, each observation output will be put at the corresponding
79 "next_observation_{key}" location.
80
81 It is also expected that env.reset() returns an observation similar to the one observed after a step is completed.
82 """
83
84 @classmethod
85 def __new__(cls, *args, **kwargs):
86 cls._info_dict_reader = None
87 return super().__new__(cls, *args, _batch_locked=True, **kwargs)
88
89 def read_action(self, action):
90 """Reads the action obtained from the input TensorDict and transforms it
91 in the format expected by the contained environment.
92
93 Args:
94 action (Tensor or TensorDict): an action to be taken in the environment
95
96 Returns: an action in a format compatible with the contained environment.
97
98 """
99 return self.action_spec.to_numpy(action, safe=False)
100
101 def read_done(self, done):
102 """Reads a done state and returns a tuple containing:
103 - a done state to be set in the environment
104 - a boolean value indicating whether the frame_skip loop should be broken
105
106 Args:
107 done (np.ndarray, boolean or other format): done state obtained from the environment
108
109 """
110 return done, done
111
112 def read_reward(self, total_reward, step_reward):
113 """Reads a reward and the total reward so far (in the frame skip loop)
114 and returns a sum of the two.
115
116 Args:
117 total_reward (torch.Tensor or TensorDict): total reward so far in the step
118 step_reward (reward in the format provided by the inner env): reward of this particular step
119
120 """
121 return total_reward + self.reward_spec.encode(step_reward)
122
123 def read_obs(
124 self, observations: Union[Dict[str, Any], torch.Tensor, np.ndarray]
125 ) -> Dict[str, Any]:
126 """Reads an observation from the environment and returns an observation
127 compatible with the output TensorDict.
128
129 Args:
130 observations (observation under a format dictated by the inner env): observation to be read.
131
132 """
133 if isinstance(observations, dict):
134 observations = {"next_" + key: value for key, value in observations.items()}
135 if not isinstance(observations, (TensorDict, dict)):
136 key = list(self.observation_spec.keys())[0]
137 observations = {key: observations}
138 observations = self.observation_spec.encode(observations)
139 return observations
140
141 def _step(self, tensordict: TensorDictBase) -> TensorDictBase:
142 action = tensordict.get("action")
143 action_np = self.read_action(action)
144
145 reward = self.reward_spec.zero(self.batch_size)
146 for _ in range(self.wrapper_frame_skip):
147 obs, _reward, done, *info = self._output_transform(
148 self._env.step(action_np)
149 )
150 if isinstance(obs, list) and len(obs) == 1:
151 # Until gym 0.25.2 we had rendered frames returned in lists of length 1
152 obs = obs[0]
153 if len(info) == 2:
154 # gym 0.26
155 truncation, info = info
156 elif len(info) == 1:
157 info = info[0]
158 elif len(info) == 0:
159 info = None
160 else:
161 raise ValueError(
162 "the environment output is expected to be either"
163 "obs, reward, done, truncation, info (gym >= 0.26) or "
164 f"obs, reward, done, info. Got info with types = ({[type(x) for x in info]})"
165 )
166
167 if _reward is None:
168 _reward = self.reward_spec.zero(self.batch_size)
169
170 reward = self.read_reward(reward, _reward)
171
172 # TODO: check how to deal with np arrays
173 done, do_break = self.read_done(done)
174 if do_break:
175 break
176
177 obs_dict = self.read_obs(obs)
178
179 if reward is None:
180 reward = np.nan
181 reward = self._to_tensor(reward, dtype=self.reward_spec.dtype)
182 done = self._to_tensor(done, dtype=torch.bool)
183 self.is_done = done
184
185 tensordict_out = TensorDict(
186 obs_dict, batch_size=tensordict.batch_size, device=self.device
187 )
188 tensordict_out.set("reward", reward)
189 tensordict_out.set("done", done)
190 if self.info_dict_reader is not None and info is not None:
191 self.info_dict_reader(info, tensordict_out)
192
193 return tensordict_out
194
195 def _reset(
196 self, tensordict: Optional[TensorDictBase] = None, **kwargs
197 ) -> TensorDictBase:
198 reset_data = self._env.reset(**kwargs)
199 if not isinstance(reset_data, tuple):
200 reset_data = (reset_data,)
201 obs, *_ = self._output_transform(reset_data)
202 tensordict_out = TensorDict(
203 source=self.read_obs(obs),
204 batch_size=self.batch_size,
205 device=self.device,
206 )
207 self._is_done = torch.zeros(self.batch_size, dtype=torch.bool)
208 tensordict_out.set("done", self._is_done)
209 return tensordict_out
210
211 def _output_transform(self, step_outputs_tuple: Tuple) -> Tuple:
212 """To be overwritten when step_outputs differ from Tuple[Observation: Union[np.ndarray, dict], reward: Number, done:Bool]"""
213 if not isinstance(step_outputs_tuple, tuple):
214 raise TypeError(
215 f"Expected step_outputs_tuple type to be Tuple but got {type(step_outputs_tuple)}"
216 )
217 return step_outputs_tuple
218
219 def set_info_dict_reader(self, info_dict_reader: callable) -> GymLikeEnv:
220 """
221 Sets an info_dict_reader function. This function should take as input an
222 info_dict dictionary and the tensordict returned by the step function, and
223 write values in an ad-hoc manner from one to the other.
224
225 Args:
226 info_dict_reader (callable): a callable taking a input dictionary and
227 output tensordict as arguments. This function should modify the
228 tensordict in-place.
229
230 Returns: the same environment with the dict_reader registered.
231
232 Examples:
233 >>> from torchrl.envs import GymWrapper, default_info_dict_reader
234 >>> reader = default_info_dict_reader(["my_info_key"])
235 >>> # assuming "some_env-v0" returns a dict with a key "my_info_key"
236 >>> env = GymWrapper(gym.make("some_env-v0")).set_info_dict_reader(info_dict_reader=reader)
237 >>> tensordict = env.reset()
238 >>> tensordict = env.rand_step(tensordict)
239 >>> assert "my_info_key" in tensordict.keys()
240
241 """
242 self.info_dict_reader = info_dict_reader
243 return self
244
245 def __repr__(self) -> str:
246 return (
247 f"{self.__class__.__name__}(env={self._env}, batch_size={self.batch_size})"
248 )
249
250 @property
251 def info_dict_reader(self):
252 return self._info_dict_reader
253
254 @info_dict_reader.setter
255 def info_dict_reader(self, value: callable):
256 self._info_dict_reader = value
257
[end of torchrl/envs/gym_like.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/torchrl/envs/gym_like.py b/torchrl/envs/gym_like.py
--- a/torchrl/envs/gym_like.py
+++ b/torchrl/envs/gym_like.py
@@ -5,20 +5,38 @@
from __future__ import annotations
+import abc
import warnings
-from typing import Optional, Union, Tuple, Any, Dict
+from typing import List, Optional, Sequence, Union, Tuple, Any, Dict
import numpy as np
import torch
from torchrl.data import TensorDict
+from torchrl.data.tensor_specs import TensorSpec, UnboundedContinuousTensorSpec
from torchrl.data.tensordict.tensordict import TensorDictBase
from torchrl.envs.common import _EnvWrapper
__all__ = ["GymLikeEnv", "default_info_dict_reader"]
-class default_info_dict_reader:
+class BaseInfoDictReader(metaclass=abc.ABCMeta):
+ """
+ Base class for info-readers.
+ """
+
+ @abc.abstractmethod
+ def __call__(
+ self, info_dict: Dict[str, Any], tensordict: TensorDictBase
+ ) -> TensorDictBase:
+ raise NotImplementedError
+
+ @abc.abstractproperty
+ def info_spec(self) -> Dict[str, TensorSpec]:
+ raise NotImplementedError
+
+
+class default_info_dict_reader(BaseInfoDictReader):
"""
Default info-key reader.
@@ -39,11 +57,30 @@
"""
- def __init__(self, keys=None):
+ def __init__(
+ self,
+ keys: List[str] = None,
+ spec: Union[Sequence[TensorSpec], Dict[str, TensorSpec]] = None,
+ ):
if keys is None:
keys = []
self.keys = keys
+ if isinstance(spec, Sequence):
+ if len(spec) != len(self.keys):
+ raise ValueError(
+ "If specifying specs for info keys with a sequence, the "
+ "length of the sequence must match the number of keys"
+ )
+ self._info_spec = dict(zip(self.keys, spec))
+ else:
+ if spec is None:
+ spec = {}
+
+ self._info_spec = {
+ key: spec.get(key, UnboundedContinuousTensorSpec()) for key in self.keys
+ }
+
def __call__(
self, info_dict: Dict[str, Any], tensordict: TensorDictBase
) -> TensorDictBase:
@@ -57,9 +94,13 @@
tensordict[key] = info_dict[key]
return tensordict
+ @property
+ def info_spec(self) -> Dict[str, TensorSpec]:
+ return self._info_spec
+
class GymLikeEnv(_EnvWrapper):
- _info_dict_reader: callable
+ _info_dict_reader: BaseInfoDictReader
"""
A gym-like env is an environment whose behaviour is similar to gym environments in what
@@ -216,7 +257,7 @@
)
return step_outputs_tuple
- def set_info_dict_reader(self, info_dict_reader: callable) -> GymLikeEnv:
+ def set_info_dict_reader(self, info_dict_reader: BaseInfoDictReader) -> GymLikeEnv:
"""
Sets an info_dict_reader function. This function should take as input an
info_dict dictionary and the tensordict returned by the step function, and
@@ -240,6 +281,8 @@
"""
self.info_dict_reader = info_dict_reader
+ for info_key, spec in info_dict_reader.info_spec.items():
+ self.observation_spec[info_key] = spec
return self
def __repr__(self) -> str:
| {"golden_diff": "diff --git a/torchrl/envs/gym_like.py b/torchrl/envs/gym_like.py\n--- a/torchrl/envs/gym_like.py\n+++ b/torchrl/envs/gym_like.py\n@@ -5,20 +5,38 @@\n \n from __future__ import annotations\n \n+import abc\n import warnings\n-from typing import Optional, Union, Tuple, Any, Dict\n+from typing import List, Optional, Sequence, Union, Tuple, Any, Dict\n \n import numpy as np\n import torch\n \n from torchrl.data import TensorDict\n+from torchrl.data.tensor_specs import TensorSpec, UnboundedContinuousTensorSpec\n from torchrl.data.tensordict.tensordict import TensorDictBase\n from torchrl.envs.common import _EnvWrapper\n \n __all__ = [\"GymLikeEnv\", \"default_info_dict_reader\"]\n \n \n-class default_info_dict_reader:\n+class BaseInfoDictReader(metaclass=abc.ABCMeta):\n+ \"\"\"\n+ Base class for info-readers.\n+ \"\"\"\n+\n+ @abc.abstractmethod\n+ def __call__(\n+ self, info_dict: Dict[str, Any], tensordict: TensorDictBase\n+ ) -> TensorDictBase:\n+ raise NotImplementedError\n+\n+ @abc.abstractproperty\n+ def info_spec(self) -> Dict[str, TensorSpec]:\n+ raise NotImplementedError\n+\n+\n+class default_info_dict_reader(BaseInfoDictReader):\n \"\"\"\n Default info-key reader.\n \n@@ -39,11 +57,30 @@\n \n \"\"\"\n \n- def __init__(self, keys=None):\n+ def __init__(\n+ self,\n+ keys: List[str] = None,\n+ spec: Union[Sequence[TensorSpec], Dict[str, TensorSpec]] = None,\n+ ):\n if keys is None:\n keys = []\n self.keys = keys\n \n+ if isinstance(spec, Sequence):\n+ if len(spec) != len(self.keys):\n+ raise ValueError(\n+ \"If specifying specs for info keys with a sequence, the \"\n+ \"length of the sequence must match the number of keys\"\n+ )\n+ self._info_spec = dict(zip(self.keys, spec))\n+ else:\n+ if spec is None:\n+ spec = {}\n+\n+ self._info_spec = {\n+ key: spec.get(key, UnboundedContinuousTensorSpec()) for key in self.keys\n+ }\n+\n def __call__(\n self, info_dict: Dict[str, Any], tensordict: TensorDictBase\n ) -> TensorDictBase:\n@@ -57,9 +94,13 @@\n tensordict[key] = info_dict[key]\n return tensordict\n \n+ @property\n+ def info_spec(self) -> Dict[str, TensorSpec]:\n+ return self._info_spec\n+\n \n class GymLikeEnv(_EnvWrapper):\n- _info_dict_reader: callable\n+ _info_dict_reader: BaseInfoDictReader\n \n \"\"\"\n A gym-like env is an environment whose behaviour is similar to gym environments in what\n@@ -216,7 +257,7 @@\n )\n return step_outputs_tuple\n \n- def set_info_dict_reader(self, info_dict_reader: callable) -> GymLikeEnv:\n+ def set_info_dict_reader(self, info_dict_reader: BaseInfoDictReader) -> GymLikeEnv:\n \"\"\"\n Sets an info_dict_reader function. This function should take as input an\n info_dict dictionary and the tensordict returned by the step function, and\n@@ -240,6 +281,8 @@\n \n \"\"\"\n self.info_dict_reader = info_dict_reader\n+ for info_key, spec in info_dict_reader.info_spec.items():\n+ self.observation_spec[info_key] = spec\n return self\n \n def __repr__(self) -> str:\n", "issue": "[Feature Request] Add info dict key-spec pairs to `env.observation_spec` if they're provided to a gym environment through `set_info_dict_reader`\n## Motivation\r\n\r\ngym's environments can pass some more info through a dictionary `info`:\r\n`obs, reward, done, terminated = env.step(action)`.\r\nThis dictionary can contain additional information about the state of the env: time spent doing the task, whether the task is solved etc.\r\nWe wrap the obs, reward and done in the output `TensorDict`. We can also place the info if they're numerical values (see #234):\r\n```python\r\n >>> from torchrl.envs.libs.gym import GymWrapper\r\n >>> from torchrl.envs import default_info_dict_reader\r\n >>> reader = default_info_dict_reader([\"my_info_key\"])\r\n >>> # assuming \"some_env-v0\" returns a dict with a key \"my_info_key\"\r\n >>> env = GymWrapper(gym.make(\"some_env-v0\"))\r\n >>> env.set_info_dict_reader(info_dict_reader=reader)\r\n >>> tensordict = env.reset()\r\n >>> tensordict = env.rand_step(tensordict)\r\n >>> assert \"my_info_key\" in tensordict.keys()\r\n```\r\n\r\nProblem is that we also register what are the expected observations (domain, dtype, device etc) in an `observation_spec` attribute. It is important for us to know what to expect as output from the env. However the info is not yet registered in `observation_spec`. This is a problem for parallel environments as we would wish to pre-allocate the tensors that we place in share memory to pass information from one process to another: for that, we use the `observation_spec` which is cheaper than resetting the env.\r\n\r\n## Solution\r\n\r\nWe should be able to provide the specs corresponding to the info keys in the `default_info_dict_reader`, and the `set_info_dict_reader` should be able to read them. If no spec is provided, a default unidimensional, floating-point unbounded spec should be assumed.\n", "before_files": [{"content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom __future__ import annotations\n\nimport warnings\nfrom typing import Optional, Union, Tuple, Any, Dict\n\nimport numpy as np\nimport torch\n\nfrom torchrl.data import TensorDict\nfrom torchrl.data.tensordict.tensordict import TensorDictBase\nfrom torchrl.envs.common import _EnvWrapper\n\n__all__ = [\"GymLikeEnv\", \"default_info_dict_reader\"]\n\n\nclass default_info_dict_reader:\n \"\"\"\n Default info-key reader.\n\n In cases where keys can be directly written to a tensordict (mostly if they abide to the\n tensordict shape), one simply needs to indicate the keys to be registered during\n instantiation.\n\n Examples:\n >>> from torchrl.envs.libs.gym import GymWrapper\n >>> from torchrl.envs import default_info_dict_reader\n >>> reader = default_info_dict_reader([\"my_info_key\"])\n >>> # assuming \"some_env-v0\" returns a dict with a key \"my_info_key\"\n >>> env = GymWrapper(gym.make(\"some_env-v0\"))\n >>> env.set_info_dict_reader(info_dict_reader=reader)\n >>> tensordict = env.reset()\n >>> tensordict = env.rand_step(tensordict)\n >>> assert \"my_info_key\" in tensordict.keys()\n\n \"\"\"\n\n def __init__(self, keys=None):\n if keys is None:\n keys = []\n self.keys = keys\n\n def __call__(\n self, info_dict: Dict[str, Any], tensordict: TensorDictBase\n ) -> TensorDictBase:\n if not isinstance(info_dict, dict) and len(self.keys):\n warnings.warn(\n f\"Found an info_dict of type {type(info_dict)} \"\n f\"but expected type or subtype `dict`.\"\n )\n for key in self.keys:\n if key in info_dict:\n tensordict[key] = info_dict[key]\n return tensordict\n\n\nclass GymLikeEnv(_EnvWrapper):\n _info_dict_reader: callable\n\n \"\"\"\n A gym-like env is an environment whose behaviour is similar to gym environments in what\n common methods (specifically reset and step) are expected to do.\n\n\n A `GymLikeEnv` has a `.step()` method with the following signature:\n\n ``env.step(action: np.ndarray) -> Tuple[Union[np.ndarray, dict], double, bool, *info]``\n\n where the outputs are the observation, reward and done state respectively.\n In this implementation, the info output is discarded (but specific keys can be read\n by updating info_dict_reader, see `set_info_dict_reader` class method).\n\n By default, the first output is written at the \"next_observation\" key-value pair in the output tensordict, unless\n the first output is a dictionary. In that case, each observation output will be put at the corresponding\n \"next_observation_{key}\" location.\n\n It is also expected that env.reset() returns an observation similar to the one observed after a step is completed.\n \"\"\"\n\n @classmethod\n def __new__(cls, *args, **kwargs):\n cls._info_dict_reader = None\n return super().__new__(cls, *args, _batch_locked=True, **kwargs)\n\n def read_action(self, action):\n \"\"\"Reads the action obtained from the input TensorDict and transforms it\n in the format expected by the contained environment.\n\n Args:\n action (Tensor or TensorDict): an action to be taken in the environment\n\n Returns: an action in a format compatible with the contained environment.\n\n \"\"\"\n return self.action_spec.to_numpy(action, safe=False)\n\n def read_done(self, done):\n \"\"\"Reads a done state and returns a tuple containing:\n - a done state to be set in the environment\n - a boolean value indicating whether the frame_skip loop should be broken\n\n Args:\n done (np.ndarray, boolean or other format): done state obtained from the environment\n\n \"\"\"\n return done, done\n\n def read_reward(self, total_reward, step_reward):\n \"\"\"Reads a reward and the total reward so far (in the frame skip loop)\n and returns a sum of the two.\n\n Args:\n total_reward (torch.Tensor or TensorDict): total reward so far in the step\n step_reward (reward in the format provided by the inner env): reward of this particular step\n\n \"\"\"\n return total_reward + self.reward_spec.encode(step_reward)\n\n def read_obs(\n self, observations: Union[Dict[str, Any], torch.Tensor, np.ndarray]\n ) -> Dict[str, Any]:\n \"\"\"Reads an observation from the environment and returns an observation\n compatible with the output TensorDict.\n\n Args:\n observations (observation under a format dictated by the inner env): observation to be read.\n\n \"\"\"\n if isinstance(observations, dict):\n observations = {\"next_\" + key: value for key, value in observations.items()}\n if not isinstance(observations, (TensorDict, dict)):\n key = list(self.observation_spec.keys())[0]\n observations = {key: observations}\n observations = self.observation_spec.encode(observations)\n return observations\n\n def _step(self, tensordict: TensorDictBase) -> TensorDictBase:\n action = tensordict.get(\"action\")\n action_np = self.read_action(action)\n\n reward = self.reward_spec.zero(self.batch_size)\n for _ in range(self.wrapper_frame_skip):\n obs, _reward, done, *info = self._output_transform(\n self._env.step(action_np)\n )\n if isinstance(obs, list) and len(obs) == 1:\n # Until gym 0.25.2 we had rendered frames returned in lists of length 1\n obs = obs[0]\n if len(info) == 2:\n # gym 0.26\n truncation, info = info\n elif len(info) == 1:\n info = info[0]\n elif len(info) == 0:\n info = None\n else:\n raise ValueError(\n \"the environment output is expected to be either\"\n \"obs, reward, done, truncation, info (gym >= 0.26) or \"\n f\"obs, reward, done, info. Got info with types = ({[type(x) for x in info]})\"\n )\n\n if _reward is None:\n _reward = self.reward_spec.zero(self.batch_size)\n\n reward = self.read_reward(reward, _reward)\n\n # TODO: check how to deal with np arrays\n done, do_break = self.read_done(done)\n if do_break:\n break\n\n obs_dict = self.read_obs(obs)\n\n if reward is None:\n reward = np.nan\n reward = self._to_tensor(reward, dtype=self.reward_spec.dtype)\n done = self._to_tensor(done, dtype=torch.bool)\n self.is_done = done\n\n tensordict_out = TensorDict(\n obs_dict, batch_size=tensordict.batch_size, device=self.device\n )\n tensordict_out.set(\"reward\", reward)\n tensordict_out.set(\"done\", done)\n if self.info_dict_reader is not None and info is not None:\n self.info_dict_reader(info, tensordict_out)\n\n return tensordict_out\n\n def _reset(\n self, tensordict: Optional[TensorDictBase] = None, **kwargs\n ) -> TensorDictBase:\n reset_data = self._env.reset(**kwargs)\n if not isinstance(reset_data, tuple):\n reset_data = (reset_data,)\n obs, *_ = self._output_transform(reset_data)\n tensordict_out = TensorDict(\n source=self.read_obs(obs),\n batch_size=self.batch_size,\n device=self.device,\n )\n self._is_done = torch.zeros(self.batch_size, dtype=torch.bool)\n tensordict_out.set(\"done\", self._is_done)\n return tensordict_out\n\n def _output_transform(self, step_outputs_tuple: Tuple) -> Tuple:\n \"\"\"To be overwritten when step_outputs differ from Tuple[Observation: Union[np.ndarray, dict], reward: Number, done:Bool]\"\"\"\n if not isinstance(step_outputs_tuple, tuple):\n raise TypeError(\n f\"Expected step_outputs_tuple type to be Tuple but got {type(step_outputs_tuple)}\"\n )\n return step_outputs_tuple\n\n def set_info_dict_reader(self, info_dict_reader: callable) -> GymLikeEnv:\n \"\"\"\n Sets an info_dict_reader function. This function should take as input an\n info_dict dictionary and the tensordict returned by the step function, and\n write values in an ad-hoc manner from one to the other.\n\n Args:\n info_dict_reader (callable): a callable taking a input dictionary and\n output tensordict as arguments. This function should modify the\n tensordict in-place.\n\n Returns: the same environment with the dict_reader registered.\n\n Examples:\n >>> from torchrl.envs import GymWrapper, default_info_dict_reader\n >>> reader = default_info_dict_reader([\"my_info_key\"])\n >>> # assuming \"some_env-v0\" returns a dict with a key \"my_info_key\"\n >>> env = GymWrapper(gym.make(\"some_env-v0\")).set_info_dict_reader(info_dict_reader=reader)\n >>> tensordict = env.reset()\n >>> tensordict = env.rand_step(tensordict)\n >>> assert \"my_info_key\" in tensordict.keys()\n\n \"\"\"\n self.info_dict_reader = info_dict_reader\n return self\n\n def __repr__(self) -> str:\n return (\n f\"{self.__class__.__name__}(env={self._env}, batch_size={self.batch_size})\"\n )\n\n @property\n def info_dict_reader(self):\n return self._info_dict_reader\n\n @info_dict_reader.setter\n def info_dict_reader(self, value: callable):\n self._info_dict_reader = value\n", "path": "torchrl/envs/gym_like.py"}]} | 3,826 | 831 |
gh_patches_debug_31329 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-1174 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
don't rely on master branch for latest version number
## Feature
### Feature description
The `master` branch of dbt isn't really a thing anymore. Instead of relying on the [master](https://github.com/fishtown-analytics/dbt/blob/51f68e3aabcda57afbe5051983d1d17e092be665/dbt/version.py#L12) branch to grab the latest release number, we should pull it from PyPi.
We can use [this api](https://warehouse.readthedocs.io/api-reference/json/) to fetch [some JSON info](https://pypi.org/pypi/dbt/json) about dbt releases.
We need to confirm that pre-releases are not shown as the latest version for a package on PyPi.
### Who will this benefit?
dbt maintainers :)
</issue>
<code>
[start of dbt/version.py]
1 import re
2
3 import dbt.semver
4
5 try:
6 # For Python 3.0 and later
7 from urllib.request import urlopen
8 except ImportError:
9 # Fall back to Python 2's urllib2
10 from urllib2 import urlopen
11
12 REMOTE_VERSION_FILE = \
13 'https://raw.githubusercontent.com/fishtown-analytics/dbt/' \
14 'master/.bumpversion.cfg'
15
16
17 def get_version_string_from_text(contents):
18 matches = re.search(r"current_version = ([\.0-9a-z]+)", contents)
19 if matches is None or len(matches.groups()) != 1:
20 return ""
21 version = matches.groups()[0]
22 return version
23
24
25 def get_remote_version_file_contents(url=REMOTE_VERSION_FILE):
26 try:
27 f = urlopen(url)
28 contents = f.read()
29 except Exception:
30 contents = ''
31 if hasattr(contents, 'decode'):
32 contents = contents.decode('utf-8')
33 return contents
34
35
36 def get_latest_version():
37 contents = get_remote_version_file_contents()
38 if contents == '':
39 return None
40 version_string = get_version_string_from_text(contents)
41 return dbt.semver.VersionSpecifier.from_version_string(version_string)
42
43
44 def get_installed_version():
45 return dbt.semver.VersionSpecifier.from_version_string(__version__)
46
47
48 def get_version_information():
49 installed = get_installed_version()
50 latest = get_latest_version()
51
52 installed_s = installed.to_version_string(skip_matcher=True)
53 if latest is None:
54 latest_s = 'unknown'
55 else:
56 latest_s = latest.to_version_string(skip_matcher=True)
57
58 version_msg = ("installed version: {}\n"
59 " latest version: {}\n\n".format(installed_s, latest_s))
60
61 if latest is None:
62 return ("{}The latest version of dbt could not be determined!\n"
63 "Make sure that the following URL is accessible:\n{}"
64 .format(version_msg, REMOTE_VERSION_FILE))
65
66 if installed == latest:
67 return "{}Up to date!".format(version_msg)
68
69 elif installed > latest:
70 return ("{}Your version of dbt is ahead of the latest "
71 "release!".format(version_msg))
72
73 else:
74 return ("{}Your version of dbt is out of date! "
75 "You can find instructions for upgrading here:\n"
76 "https://docs.getdbt.com/docs/installation"
77 .format(version_msg))
78
79
80 __version__ = '0.12.1'
81 installed = get_installed_version()
82
[end of dbt/version.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dbt/version.py b/dbt/version.py
--- a/dbt/version.py
+++ b/dbt/version.py
@@ -1,43 +1,23 @@
+import json
import re
-import dbt.semver
-
-try:
- # For Python 3.0 and later
- from urllib.request import urlopen
-except ImportError:
- # Fall back to Python 2's urllib2
- from urllib2 import urlopen
-
-REMOTE_VERSION_FILE = \
- 'https://raw.githubusercontent.com/fishtown-analytics/dbt/' \
- 'master/.bumpversion.cfg'
-
+import requests
-def get_version_string_from_text(contents):
- matches = re.search(r"current_version = ([\.0-9a-z]+)", contents)
- if matches is None or len(matches.groups()) != 1:
- return ""
- version = matches.groups()[0]
- return version
+import dbt.exceptions
+import dbt.semver
-def get_remote_version_file_contents(url=REMOTE_VERSION_FILE):
- try:
- f = urlopen(url)
- contents = f.read()
- except Exception:
- contents = ''
- if hasattr(contents, 'decode'):
- contents = contents.decode('utf-8')
- return contents
+PYPI_VERSION_URL = 'https://pypi.org/pypi/dbt/json'
def get_latest_version():
- contents = get_remote_version_file_contents()
- if contents == '':
+ try:
+ resp = requests.get(PYPI_VERSION_URL)
+ data = resp.json()
+ version_string = data['info']['version']
+ except (json.JSONDecodeError, KeyError, requests.RequestException):
return None
- version_string = get_version_string_from_text(contents)
+
return dbt.semver.VersionSpecifier.from_version_string(version_string)
@@ -61,7 +41,7 @@
if latest is None:
return ("{}The latest version of dbt could not be determined!\n"
"Make sure that the following URL is accessible:\n{}"
- .format(version_msg, REMOTE_VERSION_FILE))
+ .format(version_msg, PYPI_VERSION_URL))
if installed == latest:
return "{}Up to date!".format(version_msg)
| {"golden_diff": "diff --git a/dbt/version.py b/dbt/version.py\n--- a/dbt/version.py\n+++ b/dbt/version.py\n@@ -1,43 +1,23 @@\n+import json\n import re\n \n-import dbt.semver\n-\n-try:\n- # For Python 3.0 and later\n- from urllib.request import urlopen\n-except ImportError:\n- # Fall back to Python 2's urllib2\n- from urllib2 import urlopen\n-\n-REMOTE_VERSION_FILE = \\\n- 'https://raw.githubusercontent.com/fishtown-analytics/dbt/' \\\n- 'master/.bumpversion.cfg'\n-\n+import requests\n \n-def get_version_string_from_text(contents):\n- matches = re.search(r\"current_version = ([\\.0-9a-z]+)\", contents)\n- if matches is None or len(matches.groups()) != 1:\n- return \"\"\n- version = matches.groups()[0]\n- return version\n+import dbt.exceptions\n+import dbt.semver\n \n \n-def get_remote_version_file_contents(url=REMOTE_VERSION_FILE):\n- try:\n- f = urlopen(url)\n- contents = f.read()\n- except Exception:\n- contents = ''\n- if hasattr(contents, 'decode'):\n- contents = contents.decode('utf-8')\n- return contents\n+PYPI_VERSION_URL = 'https://pypi.org/pypi/dbt/json'\n \n \n def get_latest_version():\n- contents = get_remote_version_file_contents()\n- if contents == '':\n+ try:\n+ resp = requests.get(PYPI_VERSION_URL)\n+ data = resp.json()\n+ version_string = data['info']['version']\n+ except (json.JSONDecodeError, KeyError, requests.RequestException):\n return None\n- version_string = get_version_string_from_text(contents)\n+\n return dbt.semver.VersionSpecifier.from_version_string(version_string)\n \n \n@@ -61,7 +41,7 @@\n if latest is None:\n return (\"{}The latest version of dbt could not be determined!\\n\"\n \"Make sure that the following URL is accessible:\\n{}\"\n- .format(version_msg, REMOTE_VERSION_FILE))\n+ .format(version_msg, PYPI_VERSION_URL))\n \n if installed == latest:\n return \"{}Up to date!\".format(version_msg)\n", "issue": "don't rely on master branch for latest version number\n## Feature\r\n\r\n### Feature description\r\nThe `master` branch of dbt isn't really a thing anymore. Instead of relying on the [master](https://github.com/fishtown-analytics/dbt/blob/51f68e3aabcda57afbe5051983d1d17e092be665/dbt/version.py#L12) branch to grab the latest release number, we should pull it from PyPi.\r\n\r\nWe can use [this api](https://warehouse.readthedocs.io/api-reference/json/) to fetch [some JSON info](https://pypi.org/pypi/dbt/json) about dbt releases.\r\n\r\nWe need to confirm that pre-releases are not shown as the latest version for a package on PyPi.\r\n\r\n### Who will this benefit?\r\ndbt maintainers :) \n", "before_files": [{"content": "import re\n\nimport dbt.semver\n\ntry:\n # For Python 3.0 and later\n from urllib.request import urlopen\nexcept ImportError:\n # Fall back to Python 2's urllib2\n from urllib2 import urlopen\n\nREMOTE_VERSION_FILE = \\\n 'https://raw.githubusercontent.com/fishtown-analytics/dbt/' \\\n 'master/.bumpversion.cfg'\n\n\ndef get_version_string_from_text(contents):\n matches = re.search(r\"current_version = ([\\.0-9a-z]+)\", contents)\n if matches is None or len(matches.groups()) != 1:\n return \"\"\n version = matches.groups()[0]\n return version\n\n\ndef get_remote_version_file_contents(url=REMOTE_VERSION_FILE):\n try:\n f = urlopen(url)\n contents = f.read()\n except Exception:\n contents = ''\n if hasattr(contents, 'decode'):\n contents = contents.decode('utf-8')\n return contents\n\n\ndef get_latest_version():\n contents = get_remote_version_file_contents()\n if contents == '':\n return None\n version_string = get_version_string_from_text(contents)\n return dbt.semver.VersionSpecifier.from_version_string(version_string)\n\n\ndef get_installed_version():\n return dbt.semver.VersionSpecifier.from_version_string(__version__)\n\n\ndef get_version_information():\n installed = get_installed_version()\n latest = get_latest_version()\n\n installed_s = installed.to_version_string(skip_matcher=True)\n if latest is None:\n latest_s = 'unknown'\n else:\n latest_s = latest.to_version_string(skip_matcher=True)\n\n version_msg = (\"installed version: {}\\n\"\n \" latest version: {}\\n\\n\".format(installed_s, latest_s))\n\n if latest is None:\n return (\"{}The latest version of dbt could not be determined!\\n\"\n \"Make sure that the following URL is accessible:\\n{}\"\n .format(version_msg, REMOTE_VERSION_FILE))\n\n if installed == latest:\n return \"{}Up to date!\".format(version_msg)\n\n elif installed > latest:\n return (\"{}Your version of dbt is ahead of the latest \"\n \"release!\".format(version_msg))\n\n else:\n return (\"{}Your version of dbt is out of date! \"\n \"You can find instructions for upgrading here:\\n\"\n \"https://docs.getdbt.com/docs/installation\"\n .format(version_msg))\n\n\n__version__ = '0.12.1'\ninstalled = get_installed_version()\n", "path": "dbt/version.py"}]} | 1,415 | 498 |
gh_patches_debug_25128 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-3035 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Sum/Histogram/Gauge.data_points annotated as Sequence but actual type is generator
This applies to all of the MetricsData data types `Sum`, `Histogram` and `Gauge`. The type annotation is `Sequence`:
https://github.com/open-telemetry/opentelemetry-python/blob/778c4b1cffdd448c531c868a9470618eb8a6517c/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/point.py#L68
But the implementation assigns an `Iterable` (specifically a generator which is not a valid `Sequence`):
https://github.com/open-telemetry/opentelemetry-python/blob/778c4b1cffdd448c531c868a9470618eb8a6517c/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py#L160
**What is the expected behavior?**
Type annotated as `Sequence` should work with `len()`, subscription, [etc.](https://docs.python.org/3/library/stdtypes.html#common-sequence-operations). A more subtle bug the generator causes is that `data_points` can only be iterated over once, which will consume the generator.
**What is the actual behavior?**
```
TypeError: object of type 'generator' has no len()
```
**Additional context**
Possible fixes
I'd recommend adding calls to `list()` or `tuple()` to convert the generator to a sequence or alter `_ViewInstrumentMatch.collect()` to return a sequence itself. I think this is appropriate as the generators are actually [holding SDK locks](https://github.com/open-telemetry/opentelemetry-python/blob/778c4b1cffdd448c531c868a9470618eb8a6517c/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py#L137) which can remain locked if the generator is not consumed.
Sum/Histogram/Gauge.data_points annotated as Sequence but actual type is generator
This applies to all of the MetricsData data types `Sum`, `Histogram` and `Gauge`. The type annotation is `Sequence`:
https://github.com/open-telemetry/opentelemetry-python/blob/778c4b1cffdd448c531c868a9470618eb8a6517c/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/point.py#L68
But the implementation assigns an `Iterable` (specifically a generator which is not a valid `Sequence`):
https://github.com/open-telemetry/opentelemetry-python/blob/778c4b1cffdd448c531c868a9470618eb8a6517c/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py#L160
**What is the expected behavior?**
Type annotated as `Sequence` should work with `len()`, subscription, [etc.](https://docs.python.org/3/library/stdtypes.html#common-sequence-operations). A more subtle bug the generator causes is that `data_points` can only be iterated over once, which will consume the generator.
**What is the actual behavior?**
```
TypeError: object of type 'generator' has no len()
```
**Additional context**
Possible fixes
I'd recommend adding calls to `list()` or `tuple()` to convert the generator to a sequence or alter `_ViewInstrumentMatch.collect()` to return a sequence itself. I think this is appropriate as the generators are actually [holding SDK locks](https://github.com/open-telemetry/opentelemetry-python/blob/778c4b1cffdd448c531c868a9470618eb8a6517c/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py#L137) which can remain locked if the generator is not consumed.
</issue>
<code>
[start of opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py]
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15
16 from logging import getLogger
17 from threading import Lock
18 from time import time_ns
19 from typing import Dict, Iterable
20
21 from opentelemetry.metrics import Instrument
22 from opentelemetry.sdk.metrics._internal.aggregation import (
23 Aggregation,
24 DefaultAggregation,
25 _Aggregation,
26 _SumAggregation,
27 )
28 from opentelemetry.sdk.metrics._internal.export import AggregationTemporality
29 from opentelemetry.sdk.metrics._internal.measurement import Measurement
30 from opentelemetry.sdk.metrics._internal.point import DataPointT
31 from opentelemetry.sdk.metrics._internal.view import View
32
33 _logger = getLogger(__name__)
34
35
36 class _ViewInstrumentMatch:
37 def __init__(
38 self,
39 view: View,
40 instrument: Instrument,
41 instrument_class_aggregation: Dict[type, Aggregation],
42 ):
43 self._start_time_unix_nano = time_ns()
44 self._view = view
45 self._instrument = instrument
46 self._attributes_aggregation: Dict[frozenset, _Aggregation] = {}
47 self._lock = Lock()
48 self._instrument_class_aggregation = instrument_class_aggregation
49 self._name = self._view._name or self._instrument.name
50 self._description = (
51 self._view._description or self._instrument.description
52 )
53 if not isinstance(self._view._aggregation, DefaultAggregation):
54 self._aggregation = self._view._aggregation._create_aggregation(
55 self._instrument, None, 0
56 )
57 else:
58 self._aggregation = self._instrument_class_aggregation[
59 self._instrument.__class__
60 ]._create_aggregation(self._instrument, None, 0)
61
62 def conflicts(self, other: "_ViewInstrumentMatch") -> bool:
63 # pylint: disable=protected-access
64
65 result = (
66 self._name == other._name
67 and self._instrument.unit == other._instrument.unit
68 # The aggregation class is being used here instead of data point
69 # type since they are functionally equivalent.
70 and self._aggregation.__class__ == other._aggregation.__class__
71 )
72 if isinstance(self._aggregation, _SumAggregation):
73 result = (
74 result
75 and self._aggregation._instrument_is_monotonic
76 == other._aggregation._instrument_is_monotonic
77 and self._aggregation._instrument_temporality
78 == other._aggregation._instrument_temporality
79 )
80
81 return result
82
83 # pylint: disable=protected-access
84 def consume_measurement(self, measurement: Measurement) -> None:
85
86 if self._view._attribute_keys is not None:
87
88 attributes = {}
89
90 for key, value in (measurement.attributes or {}).items():
91 if key in self._view._attribute_keys:
92 attributes[key] = value
93 elif measurement.attributes is not None:
94 attributes = measurement.attributes
95 else:
96 attributes = {}
97
98 aggr_key = frozenset(attributes.items())
99
100 if aggr_key not in self._attributes_aggregation:
101 with self._lock:
102 if aggr_key not in self._attributes_aggregation:
103 if not isinstance(
104 self._view._aggregation, DefaultAggregation
105 ):
106 aggregation = (
107 self._view._aggregation._create_aggregation(
108 self._instrument,
109 attributes,
110 self._start_time_unix_nano,
111 )
112 )
113 else:
114 aggregation = self._instrument_class_aggregation[
115 self._instrument.__class__
116 ]._create_aggregation(
117 self._instrument,
118 attributes,
119 self._start_time_unix_nano,
120 )
121 self._attributes_aggregation[aggr_key] = aggregation
122
123 self._attributes_aggregation[aggr_key].aggregate(measurement)
124
125 def collect(
126 self,
127 aggregation_temporality: AggregationTemporality,
128 collection_start_nanos: int,
129 ) -> Iterable[DataPointT]:
130
131 with self._lock:
132 for aggregation in self._attributes_aggregation.values():
133 data_point = aggregation.collect(
134 aggregation_temporality, collection_start_nanos
135 )
136 if data_point is not None:
137 yield data_point
138
[end of opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py
@@ -16,7 +16,7 @@
from logging import getLogger
from threading import Lock
from time import time_ns
-from typing import Dict, Iterable
+from typing import Dict, List, Sequence
from opentelemetry.metrics import Instrument
from opentelemetry.sdk.metrics._internal.aggregation import (
@@ -126,12 +126,14 @@
self,
aggregation_temporality: AggregationTemporality,
collection_start_nanos: int,
- ) -> Iterable[DataPointT]:
+ ) -> Sequence[DataPointT]:
+ data_points: List[DataPointT] = []
with self._lock:
for aggregation in self._attributes_aggregation.values():
data_point = aggregation.collect(
aggregation_temporality, collection_start_nanos
)
if data_point is not None:
- yield data_point
+ data_points.append(data_point)
+ return data_points
| {"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py\n@@ -16,7 +16,7 @@\n from logging import getLogger\n from threading import Lock\n from time import time_ns\n-from typing import Dict, Iterable\n+from typing import Dict, List, Sequence\n \n from opentelemetry.metrics import Instrument\n from opentelemetry.sdk.metrics._internal.aggregation import (\n@@ -126,12 +126,14 @@\n self,\n aggregation_temporality: AggregationTemporality,\n collection_start_nanos: int,\n- ) -> Iterable[DataPointT]:\n+ ) -> Sequence[DataPointT]:\n \n+ data_points: List[DataPointT] = []\n with self._lock:\n for aggregation in self._attributes_aggregation.values():\n data_point = aggregation.collect(\n aggregation_temporality, collection_start_nanos\n )\n if data_point is not None:\n- yield data_point\n+ data_points.append(data_point)\n+ return data_points\n", "issue": "Sum/Histogram/Gauge.data_points annotated as Sequence but actual type is generator\nThis applies to all of the MetricsData data types `Sum`, `Histogram` and `Gauge`. The type annotation is `Sequence`:\r\nhttps://github.com/open-telemetry/opentelemetry-python/blob/778c4b1cffdd448c531c868a9470618eb8a6517c/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/point.py#L68\r\n\r\nBut the implementation assigns an `Iterable` (specifically a generator which is not a valid `Sequence`):\r\nhttps://github.com/open-telemetry/opentelemetry-python/blob/778c4b1cffdd448c531c868a9470618eb8a6517c/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py#L160\r\n\r\n**What is the expected behavior?**\r\nType annotated as `Sequence` should work with `len()`, subscription, [etc.](https://docs.python.org/3/library/stdtypes.html#common-sequence-operations). A more subtle bug the generator causes is that `data_points` can only be iterated over once, which will consume the generator.\r\n\r\n**What is the actual behavior?**\r\n```\r\nTypeError: object of type 'generator' has no len()\r\n```\r\n\r\n**Additional context**\r\nPossible fixes\r\n\r\nI'd recommend adding calls to `list()` or `tuple()` to convert the generator to a sequence or alter `_ViewInstrumentMatch.collect()` to return a sequence itself. I think this is appropriate as the generators are actually [holding SDK locks](https://github.com/open-telemetry/opentelemetry-python/blob/778c4b1cffdd448c531c868a9470618eb8a6517c/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py#L137) which can remain locked if the generator is not consumed.\nSum/Histogram/Gauge.data_points annotated as Sequence but actual type is generator\nThis applies to all of the MetricsData data types `Sum`, `Histogram` and `Gauge`. The type annotation is `Sequence`:\r\nhttps://github.com/open-telemetry/opentelemetry-python/blob/778c4b1cffdd448c531c868a9470618eb8a6517c/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/point.py#L68\r\n\r\nBut the implementation assigns an `Iterable` (specifically a generator which is not a valid `Sequence`):\r\nhttps://github.com/open-telemetry/opentelemetry-python/blob/778c4b1cffdd448c531c868a9470618eb8a6517c/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/metric_reader_storage.py#L160\r\n\r\n**What is the expected behavior?**\r\nType annotated as `Sequence` should work with `len()`, subscription, [etc.](https://docs.python.org/3/library/stdtypes.html#common-sequence-operations). A more subtle bug the generator causes is that `data_points` can only be iterated over once, which will consume the generator.\r\n\r\n**What is the actual behavior?**\r\n```\r\nTypeError: object of type 'generator' has no len()\r\n```\r\n\r\n**Additional context**\r\nPossible fixes\r\n\r\nI'd recommend adding calls to `list()` or `tuple()` to convert the generator to a sequence or alter `_ViewInstrumentMatch.collect()` to return a sequence itself. I think this is appropriate as the generators are actually [holding SDK locks](https://github.com/open-telemetry/opentelemetry-python/blob/778c4b1cffdd448c531c868a9470618eb8a6517c/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py#L137) which can remain locked if the generator is not consumed.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom logging import getLogger\nfrom threading import Lock\nfrom time import time_ns\nfrom typing import Dict, Iterable\n\nfrom opentelemetry.metrics import Instrument\nfrom opentelemetry.sdk.metrics._internal.aggregation import (\n Aggregation,\n DefaultAggregation,\n _Aggregation,\n _SumAggregation,\n)\nfrom opentelemetry.sdk.metrics._internal.export import AggregationTemporality\nfrom opentelemetry.sdk.metrics._internal.measurement import Measurement\nfrom opentelemetry.sdk.metrics._internal.point import DataPointT\nfrom opentelemetry.sdk.metrics._internal.view import View\n\n_logger = getLogger(__name__)\n\n\nclass _ViewInstrumentMatch:\n def __init__(\n self,\n view: View,\n instrument: Instrument,\n instrument_class_aggregation: Dict[type, Aggregation],\n ):\n self._start_time_unix_nano = time_ns()\n self._view = view\n self._instrument = instrument\n self._attributes_aggregation: Dict[frozenset, _Aggregation] = {}\n self._lock = Lock()\n self._instrument_class_aggregation = instrument_class_aggregation\n self._name = self._view._name or self._instrument.name\n self._description = (\n self._view._description or self._instrument.description\n )\n if not isinstance(self._view._aggregation, DefaultAggregation):\n self._aggregation = self._view._aggregation._create_aggregation(\n self._instrument, None, 0\n )\n else:\n self._aggregation = self._instrument_class_aggregation[\n self._instrument.__class__\n ]._create_aggregation(self._instrument, None, 0)\n\n def conflicts(self, other: \"_ViewInstrumentMatch\") -> bool:\n # pylint: disable=protected-access\n\n result = (\n self._name == other._name\n and self._instrument.unit == other._instrument.unit\n # The aggregation class is being used here instead of data point\n # type since they are functionally equivalent.\n and self._aggregation.__class__ == other._aggregation.__class__\n )\n if isinstance(self._aggregation, _SumAggregation):\n result = (\n result\n and self._aggregation._instrument_is_monotonic\n == other._aggregation._instrument_is_monotonic\n and self._aggregation._instrument_temporality\n == other._aggregation._instrument_temporality\n )\n\n return result\n\n # pylint: disable=protected-access\n def consume_measurement(self, measurement: Measurement) -> None:\n\n if self._view._attribute_keys is not None:\n\n attributes = {}\n\n for key, value in (measurement.attributes or {}).items():\n if key in self._view._attribute_keys:\n attributes[key] = value\n elif measurement.attributes is not None:\n attributes = measurement.attributes\n else:\n attributes = {}\n\n aggr_key = frozenset(attributes.items())\n\n if aggr_key not in self._attributes_aggregation:\n with self._lock:\n if aggr_key not in self._attributes_aggregation:\n if not isinstance(\n self._view._aggregation, DefaultAggregation\n ):\n aggregation = (\n self._view._aggregation._create_aggregation(\n self._instrument,\n attributes,\n self._start_time_unix_nano,\n )\n )\n else:\n aggregation = self._instrument_class_aggregation[\n self._instrument.__class__\n ]._create_aggregation(\n self._instrument,\n attributes,\n self._start_time_unix_nano,\n )\n self._attributes_aggregation[aggr_key] = aggregation\n\n self._attributes_aggregation[aggr_key].aggregate(measurement)\n\n def collect(\n self,\n aggregation_temporality: AggregationTemporality,\n collection_start_nanos: int,\n ) -> Iterable[DataPointT]:\n\n with self._lock:\n for aggregation in self._attributes_aggregation.values():\n data_point = aggregation.collect(\n aggregation_temporality, collection_start_nanos\n )\n if data_point is not None:\n yield data_point\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/_view_instrument_match.py"}]} | 2,774 | 291 |
gh_patches_debug_21829 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-2062 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Christina to fixtures
### Issue description
Christina (@witha-c ) will need a sandbox soon and should be added to fixtures so she can access admin on her own sandbox once fixtures are loaded to it
### Acceptance criteria
- [ ] add Christina to fixtures as analyst and superuser
### Additional context
_No response_
### Links to other issues
_No response_
</issue>
<code>
[start of src/registrar/fixtures_users.py]
1 import logging
2 from faker import Faker
3 from django.db import transaction
4
5 from registrar.models import (
6 User,
7 UserGroup,
8 )
9
10 fake = Faker()
11 logger = logging.getLogger(__name__)
12
13
14 class UserFixture:
15 """
16 Load users into the database.
17
18 Make sure this class' `load` method is called from `handle`
19 in management/commands/load.py, then use `./manage.py load`
20 to run this code.
21 """
22
23 ADMINS = [
24 {
25 "username": "5f283494-31bd-49b5-b024-a7e7cae00848",
26 "first_name": "Rachid",
27 "last_name": "Mrad",
28 },
29 {
30 "username": "eb2214cd-fc0c-48c0-9dbd-bc4cd6820c74",
31 "first_name": "Alysia",
32 "last_name": "Broddrick",
33 },
34 {
35 "username": "8f8e7293-17f7-4716-889b-1990241cbd39",
36 "first_name": "Katherine",
37 "last_name": "Osos",
38 },
39 {
40 "username": "70488e0a-e937-4894-a28c-16f5949effd4",
41 "first_name": "Gaby",
42 "last_name": "DiSarli",
43 "email": "[email protected]",
44 },
45 {
46 "username": "83c2b6dd-20a2-4cac-bb40-e22a72d2955c",
47 "first_name": "Cameron",
48 "last_name": "Dixon",
49 },
50 {
51 "username": "0353607a-cbba-47d2-98d7-e83dcd5b90ea",
52 "first_name": "Ryan",
53 "last_name": "Brooks",
54 },
55 {
56 "username": "30001ee7-0467-4df2-8db2-786e79606060",
57 "first_name": "Zander",
58 "last_name": "Adkinson",
59 },
60 {
61 "username": "2bf518c2-485a-4c42-ab1a-f5a8b0a08484",
62 "first_name": "Paul",
63 "last_name": "Kuykendall",
64 },
65 {
66 "username": "2a88a97b-be96-4aad-b99e-0b605b492c78",
67 "first_name": "Rebecca",
68 "last_name": "Hsieh",
69 },
70 {
71 "username": "fa69c8e8-da83-4798-a4f2-263c9ce93f52",
72 "first_name": "David",
73 "last_name": "Kennedy",
74 },
75 {
76 "username": "f14433d8-f0e9-41bf-9c72-b99b110e665d",
77 "first_name": "Nicolle",
78 "last_name": "LeClair",
79 },
80 {
81 "username": "24840450-bf47-4d89-8aa9-c612fe68f9da",
82 "first_name": "Erin",
83 "last_name": "Song",
84 },
85 {
86 "username": "e0ea8b94-6e53-4430-814a-849a7ca45f21",
87 "first_name": "Kristina",
88 "last_name": "Yin",
89 },
90 {
91 "username": "ac49d7c1-368a-4e6b-8f1d-60250e20a16f",
92 "first_name": "Vicky",
93 "last_name": "Chin",
94 "email": "[email protected]",
95 },
96 {
97 "username": "012f844d-8a0f-4225-9d82-cbf87bff1d3e",
98 "first_name": "Riley",
99 "last_name": "Orr",
100 "email": "[email protected]",
101 },
102 ]
103
104 STAFF = [
105 {
106 "username": "319c490d-453b-43d9-bc4d-7d6cd8ff6844",
107 "first_name": "Rachid-Analyst",
108 "last_name": "Mrad-Analyst",
109 "email": "[email protected]",
110 },
111 {
112 "username": "b6a15987-5c88-4e26-8de2-ca71a0bdb2cd",
113 "first_name": "Alysia-Analyst",
114 "last_name": "Alysia-Analyst",
115 },
116 {
117 "username": "91a9b97c-bd0a-458d-9823-babfde7ebf44",
118 "first_name": "Katherine-Analyst",
119 "last_name": "Osos-Analyst",
120 "email": "[email protected]",
121 },
122 {
123 "username": "2cc0cde8-8313-4a50-99d8-5882e71443e8",
124 "first_name": "Zander-Analyst",
125 "last_name": "Adkinson-Analyst",
126 },
127 {
128 "username": "57ab5847-7789-49fe-a2f9-21d38076d699",
129 "first_name": "Paul-Analyst",
130 "last_name": "Kuykendall-Analyst",
131 },
132 {
133 "username": "e474e7a9-71ca-449d-833c-8a6e094dd117",
134 "first_name": "Rebecca-Analyst",
135 "last_name": "Hsieh-Analyst",
136 },
137 {
138 "username": "5dc6c9a6-61d9-42b4-ba54-4beff28bac3c",
139 "first_name": "David-Analyst",
140 "last_name": "Kennedy-Analyst",
141 },
142 {
143 "username": "0eb6f326-a3d4-410f-a521-aa4c1fad4e47",
144 "first_name": "Gaby-Analyst",
145 "last_name": "DiSarli-Analyst",
146 "email": "[email protected]",
147 },
148 {
149 "username": "cfe7c2fc-e24a-480e-8b78-28645a1459b3",
150 "first_name": "Nicolle-Analyst",
151 "last_name": "LeClair-Analyst",
152 "email": "[email protected]",
153 },
154 {
155 "username": "378d0bc4-d5a7-461b-bd84-3ae6f6864af9",
156 "first_name": "Erin-Analyst",
157 "last_name": "Song-Analyst",
158 "email": "[email protected]",
159 },
160 {
161 "username": "9a98e4c9-9409-479d-964e-4aec7799107f",
162 "first_name": "Kristina-Analyst",
163 "last_name": "Yin-Analyst",
164 "email": "[email protected]",
165 },
166 {
167 "username": "8f42302e-b83a-4c9e-8764-fc19e2cea576",
168 "first_name": "Vickster-Analyst",
169 "last_name": "Chin-Analyst",
170 "email": "[email protected]",
171 },
172 {
173 "username": "d9839768-0c17-4fa2-9c8e-36291eef5c11",
174 "first_name": "Alex-Analyst",
175 "last_name": "Mcelya-Analyst",
176 "email": "[email protected]",
177 },
178 {
179 "username": "082a066f-e0a4-45f6-8672-4343a1208a36",
180 "first_name": "Riley-Analyst",
181 "last_name": "Orr-Analyst",
182 "email": "[email protected]",
183 },
184 ]
185
186 def load_users(cls, users, group_name):
187 logger.info(f"Going to load {len(users)} users in group {group_name}")
188 for user_data in users:
189 try:
190 user, _ = User.objects.get_or_create(username=user_data["username"])
191 user.is_superuser = False
192 user.first_name = user_data["first_name"]
193 user.last_name = user_data["last_name"]
194 if "email" in user_data:
195 user.email = user_data["email"]
196 user.is_staff = True
197 user.is_active = True
198 group = UserGroup.objects.get(name=group_name)
199 user.groups.add(group)
200 user.save()
201 logger.debug(f"User object created for {user_data['first_name']}")
202 except Exception as e:
203 logger.warning(e)
204 logger.info(f"All users in group {group_name} loaded.")
205
206 @classmethod
207 def load(cls):
208 # Lumped under .atomic to ensure we don't make redundant DB calls.
209 # This bundles them all together, and then saves it in a single call.
210 # This is slightly different then bulk_create or bulk_update, in that
211 # you still get the same behaviour of .save(), but those incremental
212 # steps now do not need to close/reopen a db connection,
213 # instead they share one.
214 with transaction.atomic():
215 cls.load_users(cls, cls.ADMINS, "full_access_group")
216 cls.load_users(cls, cls.STAFF, "cisa_analysts_group")
217
[end of src/registrar/fixtures_users.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/registrar/fixtures_users.py b/src/registrar/fixtures_users.py
--- a/src/registrar/fixtures_users.py
+++ b/src/registrar/fixtures_users.py
@@ -93,6 +93,12 @@
"last_name": "Chin",
"email": "[email protected]",
},
+ {
+ "username": "66bb1a5a-a091-4d7f-a6cf-4d772b4711c7",
+ "first_name": "Christina",
+ "last_name": "Burnett",
+ "email": "[email protected]",
+ },
{
"username": "012f844d-8a0f-4225-9d82-cbf87bff1d3e",
"first_name": "Riley",
@@ -169,6 +175,12 @@
"last_name": "Chin-Analyst",
"email": "[email protected]",
},
+ {
+ "username": "22f88aa5-3b54-4b1f-9c57-201fb02ddba7",
+ "first_name": "Christina-Analyst",
+ "last_name": "Burnett-Analyst",
+ "email": "[email protected]",
+ },
{
"username": "d9839768-0c17-4fa2-9c8e-36291eef5c11",
"first_name": "Alex-Analyst",
| {"golden_diff": "diff --git a/src/registrar/fixtures_users.py b/src/registrar/fixtures_users.py\n--- a/src/registrar/fixtures_users.py\n+++ b/src/registrar/fixtures_users.py\n@@ -93,6 +93,12 @@\n \"last_name\": \"Chin\",\n \"email\": \"[email protected]\",\n },\n+ {\n+ \"username\": \"66bb1a5a-a091-4d7f-a6cf-4d772b4711c7\",\n+ \"first_name\": \"Christina\",\n+ \"last_name\": \"Burnett\",\n+ \"email\": \"[email protected]\",\n+ },\n {\n \"username\": \"012f844d-8a0f-4225-9d82-cbf87bff1d3e\",\n \"first_name\": \"Riley\",\n@@ -169,6 +175,12 @@\n \"last_name\": \"Chin-Analyst\",\n \"email\": \"[email protected]\",\n },\n+ {\n+ \"username\": \"22f88aa5-3b54-4b1f-9c57-201fb02ddba7\",\n+ \"first_name\": \"Christina-Analyst\",\n+ \"last_name\": \"Burnett-Analyst\",\n+ \"email\": \"[email protected]\",\n+ },\n {\n \"username\": \"d9839768-0c17-4fa2-9c8e-36291eef5c11\",\n \"first_name\": \"Alex-Analyst\",\n", "issue": "Add Christina to fixtures\n### Issue description\n\nChristina (@witha-c ) will need a sandbox soon and should be added to fixtures so she can access admin on her own sandbox once fixtures are loaded to it\n\n### Acceptance criteria\n\n- [ ] add Christina to fixtures as analyst and superuser\n\n### Additional context\n\n_No response_\n\n### Links to other issues\n\n_No response_\n", "before_files": [{"content": "import logging\nfrom faker import Faker\nfrom django.db import transaction\n\nfrom registrar.models import (\n User,\n UserGroup,\n)\n\nfake = Faker()\nlogger = logging.getLogger(__name__)\n\n\nclass UserFixture:\n \"\"\"\n Load users into the database.\n\n Make sure this class' `load` method is called from `handle`\n in management/commands/load.py, then use `./manage.py load`\n to run this code.\n \"\"\"\n\n ADMINS = [\n {\n \"username\": \"5f283494-31bd-49b5-b024-a7e7cae00848\",\n \"first_name\": \"Rachid\",\n \"last_name\": \"Mrad\",\n },\n {\n \"username\": \"eb2214cd-fc0c-48c0-9dbd-bc4cd6820c74\",\n \"first_name\": \"Alysia\",\n \"last_name\": \"Broddrick\",\n },\n {\n \"username\": \"8f8e7293-17f7-4716-889b-1990241cbd39\",\n \"first_name\": \"Katherine\",\n \"last_name\": \"Osos\",\n },\n {\n \"username\": \"70488e0a-e937-4894-a28c-16f5949effd4\",\n \"first_name\": \"Gaby\",\n \"last_name\": \"DiSarli\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"83c2b6dd-20a2-4cac-bb40-e22a72d2955c\",\n \"first_name\": \"Cameron\",\n \"last_name\": \"Dixon\",\n },\n {\n \"username\": \"0353607a-cbba-47d2-98d7-e83dcd5b90ea\",\n \"first_name\": \"Ryan\",\n \"last_name\": \"Brooks\",\n },\n {\n \"username\": \"30001ee7-0467-4df2-8db2-786e79606060\",\n \"first_name\": \"Zander\",\n \"last_name\": \"Adkinson\",\n },\n {\n \"username\": \"2bf518c2-485a-4c42-ab1a-f5a8b0a08484\",\n \"first_name\": \"Paul\",\n \"last_name\": \"Kuykendall\",\n },\n {\n \"username\": \"2a88a97b-be96-4aad-b99e-0b605b492c78\",\n \"first_name\": \"Rebecca\",\n \"last_name\": \"Hsieh\",\n },\n {\n \"username\": \"fa69c8e8-da83-4798-a4f2-263c9ce93f52\",\n \"first_name\": \"David\",\n \"last_name\": \"Kennedy\",\n },\n {\n \"username\": \"f14433d8-f0e9-41bf-9c72-b99b110e665d\",\n \"first_name\": \"Nicolle\",\n \"last_name\": \"LeClair\",\n },\n {\n \"username\": \"24840450-bf47-4d89-8aa9-c612fe68f9da\",\n \"first_name\": \"Erin\",\n \"last_name\": \"Song\",\n },\n {\n \"username\": \"e0ea8b94-6e53-4430-814a-849a7ca45f21\",\n \"first_name\": \"Kristina\",\n \"last_name\": \"Yin\",\n },\n {\n \"username\": \"ac49d7c1-368a-4e6b-8f1d-60250e20a16f\",\n \"first_name\": \"Vicky\",\n \"last_name\": \"Chin\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"012f844d-8a0f-4225-9d82-cbf87bff1d3e\",\n \"first_name\": \"Riley\",\n \"last_name\": \"Orr\",\n \"email\": \"[email protected]\",\n },\n ]\n\n STAFF = [\n {\n \"username\": \"319c490d-453b-43d9-bc4d-7d6cd8ff6844\",\n \"first_name\": \"Rachid-Analyst\",\n \"last_name\": \"Mrad-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"b6a15987-5c88-4e26-8de2-ca71a0bdb2cd\",\n \"first_name\": \"Alysia-Analyst\",\n \"last_name\": \"Alysia-Analyst\",\n },\n {\n \"username\": \"91a9b97c-bd0a-458d-9823-babfde7ebf44\",\n \"first_name\": \"Katherine-Analyst\",\n \"last_name\": \"Osos-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"2cc0cde8-8313-4a50-99d8-5882e71443e8\",\n \"first_name\": \"Zander-Analyst\",\n \"last_name\": \"Adkinson-Analyst\",\n },\n {\n \"username\": \"57ab5847-7789-49fe-a2f9-21d38076d699\",\n \"first_name\": \"Paul-Analyst\",\n \"last_name\": \"Kuykendall-Analyst\",\n },\n {\n \"username\": \"e474e7a9-71ca-449d-833c-8a6e094dd117\",\n \"first_name\": \"Rebecca-Analyst\",\n \"last_name\": \"Hsieh-Analyst\",\n },\n {\n \"username\": \"5dc6c9a6-61d9-42b4-ba54-4beff28bac3c\",\n \"first_name\": \"David-Analyst\",\n \"last_name\": \"Kennedy-Analyst\",\n },\n {\n \"username\": \"0eb6f326-a3d4-410f-a521-aa4c1fad4e47\",\n \"first_name\": \"Gaby-Analyst\",\n \"last_name\": \"DiSarli-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"cfe7c2fc-e24a-480e-8b78-28645a1459b3\",\n \"first_name\": \"Nicolle-Analyst\",\n \"last_name\": \"LeClair-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"378d0bc4-d5a7-461b-bd84-3ae6f6864af9\",\n \"first_name\": \"Erin-Analyst\",\n \"last_name\": \"Song-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"9a98e4c9-9409-479d-964e-4aec7799107f\",\n \"first_name\": \"Kristina-Analyst\",\n \"last_name\": \"Yin-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"8f42302e-b83a-4c9e-8764-fc19e2cea576\",\n \"first_name\": \"Vickster-Analyst\",\n \"last_name\": \"Chin-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"d9839768-0c17-4fa2-9c8e-36291eef5c11\",\n \"first_name\": \"Alex-Analyst\",\n \"last_name\": \"Mcelya-Analyst\",\n \"email\": \"[email protected]\",\n },\n {\n \"username\": \"082a066f-e0a4-45f6-8672-4343a1208a36\",\n \"first_name\": \"Riley-Analyst\",\n \"last_name\": \"Orr-Analyst\",\n \"email\": \"[email protected]\",\n },\n ]\n\n def load_users(cls, users, group_name):\n logger.info(f\"Going to load {len(users)} users in group {group_name}\")\n for user_data in users:\n try:\n user, _ = User.objects.get_or_create(username=user_data[\"username\"])\n user.is_superuser = False\n user.first_name = user_data[\"first_name\"]\n user.last_name = user_data[\"last_name\"]\n if \"email\" in user_data:\n user.email = user_data[\"email\"]\n user.is_staff = True\n user.is_active = True\n group = UserGroup.objects.get(name=group_name)\n user.groups.add(group)\n user.save()\n logger.debug(f\"User object created for {user_data['first_name']}\")\n except Exception as e:\n logger.warning(e)\n logger.info(f\"All users in group {group_name} loaded.\")\n\n @classmethod\n def load(cls):\n # Lumped under .atomic to ensure we don't make redundant DB calls.\n # This bundles them all together, and then saves it in a single call.\n # This is slightly different then bulk_create or bulk_update, in that\n # you still get the same behaviour of .save(), but those incremental\n # steps now do not need to close/reopen a db connection,\n # instead they share one.\n with transaction.atomic():\n cls.load_users(cls, cls.ADMINS, \"full_access_group\")\n cls.load_users(cls, cls.STAFF, \"cisa_analysts_group\")\n", "path": "src/registrar/fixtures_users.py"}]} | 3,621 | 396 |
gh_patches_debug_16102 | rasdani/github-patches | git_diff | modin-project__modin-3670 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Improve OmniSci "quick start" usability
Right now to activate OmniSci you'd have to set both `Backend` to `Omnisci` and `Engine` to `native`.
I think we should try to set `Engine` to `native` if it's unset by user while `Backend` is set to `Omnisci`.
</issue>
<code>
[start of modin/pandas/__init__.py]
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 import pandas
15
16 __pandas_version__ = "1.3.4"
17
18 if pandas.__version__ != __pandas_version__:
19 import warnings
20
21 warnings.warn(
22 "The pandas version installed {} does not match the supported pandas version in"
23 " Modin {}. This may cause undesired side effects!".format(
24 pandas.__version__, __pandas_version__
25 )
26 )
27
28 from pandas import (
29 eval,
30 cut,
31 factorize,
32 test,
33 qcut,
34 date_range,
35 period_range,
36 Index,
37 MultiIndex,
38 CategoricalIndex,
39 bdate_range,
40 DatetimeIndex,
41 Timedelta,
42 Timestamp,
43 to_timedelta,
44 set_eng_float_format,
45 options,
46 Flags,
47 set_option,
48 NaT,
49 PeriodIndex,
50 Categorical,
51 Interval,
52 UInt8Dtype,
53 UInt16Dtype,
54 UInt32Dtype,
55 UInt64Dtype,
56 SparseDtype,
57 Int8Dtype,
58 Int16Dtype,
59 Int32Dtype,
60 Int64Dtype,
61 Float32Dtype,
62 Float64Dtype,
63 StringDtype,
64 BooleanDtype,
65 CategoricalDtype,
66 DatetimeTZDtype,
67 IntervalDtype,
68 PeriodDtype,
69 RangeIndex,
70 Int64Index,
71 UInt64Index,
72 Float64Index,
73 TimedeltaIndex,
74 IntervalIndex,
75 IndexSlice,
76 Grouper,
77 array,
78 Period,
79 show_versions,
80 DateOffset,
81 timedelta_range,
82 infer_freq,
83 interval_range,
84 ExcelWriter,
85 datetime,
86 NamedAgg,
87 NA,
88 api,
89 )
90 import os
91 import multiprocessing
92
93 from modin.config import Engine, Parameter
94
95 # Set this so that Pandas doesn't try to multithread by itself
96 os.environ["OMP_NUM_THREADS"] = "1"
97
98 _is_first_update = {}
99 dask_client = None
100 _NOINIT_ENGINES = {
101 "Python",
102 } # engines that don't require initialization, useful for unit tests
103
104
105 def _update_engine(publisher: Parameter):
106 global dask_client
107 from modin.config import StorageFormat, CpuCount
108
109 if publisher.get() == "Ray":
110 if _is_first_update.get("Ray", True):
111 from modin.core.execution.ray.common.utils import initialize_ray
112
113 initialize_ray()
114 elif publisher.get() == "Native":
115 # With OmniSci storage format there is only a single worker per node
116 # and we allow it to work on all cores.
117 if StorageFormat.get() == "Omnisci":
118 os.environ["OMP_NUM_THREADS"] = str(CpuCount.get())
119 else:
120 raise ValueError(
121 f"Storage format should be 'Omnisci' with 'Native' engine, but provided {StorageFormat.get()}."
122 )
123 elif publisher.get() == "Dask":
124 if _is_first_update.get("Dask", True):
125 from modin.core.execution.dask.common.utils import initialize_dask
126
127 initialize_dask()
128 elif publisher.get() == "Cloudray":
129 from modin.experimental.cloud import get_connection
130
131 conn = get_connection()
132 if _is_first_update.get("Cloudray", True):
133
134 @conn.teleport
135 def init_remote_ray(partition):
136 from ray import ray_constants
137 import modin
138 from modin.core.execution.ray.common.utils import initialize_ray
139
140 modin.set_execution("Ray", partition)
141 initialize_ray(
142 override_is_cluster=True,
143 override_redis_address=f"localhost:{ray_constants.DEFAULT_PORT}",
144 override_redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
145 )
146
147 init_remote_ray(StorageFormat.get())
148 # import FactoryDispatcher here to initialize IO class
149 # so it doesn't skew read_csv() timings later on
150 import modin.core.execution.dispatching.factories.dispatcher # noqa: F401
151 else:
152 get_connection().modules["modin"].set_execution("Ray", StorageFormat.get())
153 elif publisher.get() == "Cloudpython":
154 from modin.experimental.cloud import get_connection
155
156 get_connection().modules["modin"].set_execution("Python")
157 elif publisher.get() == "Cloudnative":
158 from modin.experimental.cloud import get_connection
159
160 assert (
161 StorageFormat.get() == "Omnisci"
162 ), f"Storage format should be 'Omnisci' with 'Cloudnative' engine, but provided {StorageFormat.get()}."
163 get_connection().modules["modin"].set_execution("Native", "OmniSci")
164
165 elif publisher.get() not in _NOINIT_ENGINES:
166 raise ImportError("Unrecognized execution engine: {}.".format(publisher.get()))
167
168 _is_first_update[publisher.get()] = False
169
170
171 from .. import __version__
172 from .dataframe import DataFrame
173 from .io import (
174 read_csv,
175 read_parquet,
176 read_json,
177 read_html,
178 read_clipboard,
179 read_excel,
180 read_hdf,
181 read_feather,
182 read_stata,
183 read_sas,
184 read_pickle,
185 read_sql,
186 read_gbq,
187 read_table,
188 read_fwf,
189 read_sql_table,
190 read_sql_query,
191 read_spss,
192 ExcelFile,
193 to_pickle,
194 HDFStore,
195 json_normalize,
196 read_orc,
197 read_xml,
198 )
199 from .series import Series
200 from .general import (
201 concat,
202 isna,
203 isnull,
204 merge,
205 merge_asof,
206 merge_ordered,
207 pivot_table,
208 notnull,
209 notna,
210 pivot,
211 to_numeric,
212 to_datetime,
213 unique,
214 value_counts,
215 get_dummies,
216 melt,
217 crosstab,
218 lreshape,
219 wide_to_long,
220 )
221 from .plotting import Plotting as plotting
222
223 __all__ = [
224 "DataFrame",
225 "Series",
226 "read_csv",
227 "read_parquet",
228 "read_json",
229 "read_html",
230 "read_clipboard",
231 "read_excel",
232 "read_hdf",
233 "read_feather",
234 "read_stata",
235 "read_sas",
236 "read_pickle",
237 "read_sql",
238 "read_gbq",
239 "read_table",
240 "read_spss",
241 "read_orc",
242 "json_normalize",
243 "concat",
244 "eval",
245 "cut",
246 "factorize",
247 "test",
248 "qcut",
249 "to_datetime",
250 "get_dummies",
251 "isna",
252 "isnull",
253 "merge",
254 "pivot_table",
255 "date_range",
256 "Index",
257 "MultiIndex",
258 "Series",
259 "bdate_range",
260 "period_range",
261 "DatetimeIndex",
262 "to_timedelta",
263 "set_eng_float_format",
264 "options",
265 "set_option",
266 "CategoricalIndex",
267 "Timedelta",
268 "Timestamp",
269 "NaT",
270 "PeriodIndex",
271 "Categorical",
272 "__version__",
273 "melt",
274 "crosstab",
275 "plotting",
276 "Interval",
277 "UInt8Dtype",
278 "UInt16Dtype",
279 "UInt32Dtype",
280 "UInt64Dtype",
281 "SparseDtype",
282 "Int8Dtype",
283 "Int16Dtype",
284 "Int32Dtype",
285 "Int64Dtype",
286 "CategoricalDtype",
287 "DatetimeTZDtype",
288 "IntervalDtype",
289 "PeriodDtype",
290 "BooleanDtype",
291 "StringDtype",
292 "NA",
293 "RangeIndex",
294 "Int64Index",
295 "UInt64Index",
296 "Float64Index",
297 "TimedeltaIndex",
298 "IntervalIndex",
299 "IndexSlice",
300 "Grouper",
301 "array",
302 "Period",
303 "show_versions",
304 "DateOffset",
305 "timedelta_range",
306 "infer_freq",
307 "interval_range",
308 "ExcelWriter",
309 "read_fwf",
310 "read_sql_table",
311 "read_sql_query",
312 "ExcelFile",
313 "to_pickle",
314 "HDFStore",
315 "lreshape",
316 "wide_to_long",
317 "merge_asof",
318 "merge_ordered",
319 "notnull",
320 "notna",
321 "pivot",
322 "to_numeric",
323 "unique",
324 "value_counts",
325 "datetime",
326 "NamedAgg",
327 "api",
328 ]
329
330 del pandas, Engine, Parameter
331
[end of modin/pandas/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/modin/pandas/__init__.py b/modin/pandas/__init__.py
--- a/modin/pandas/__init__.py
+++ b/modin/pandas/__init__.py
@@ -105,8 +105,22 @@
def _update_engine(publisher: Parameter):
global dask_client
from modin.config import StorageFormat, CpuCount
+ from modin.config.envvars import IsExperimental
+ from modin.config.pubsub import ValueSource
- if publisher.get() == "Ray":
+ if (
+ StorageFormat.get() == "Omnisci"
+ and publisher.get_value_source() == ValueSource.DEFAULT
+ ):
+ publisher.put("Native")
+ IsExperimental.put(True)
+ elif (
+ publisher.get() == "Native"
+ and StorageFormat.get_value_source() == ValueSource.DEFAULT
+ ):
+ StorageFormat.put("Omnisci")
+ IsExperimental.put(True)
+ elif publisher.get() == "Ray":
if _is_first_update.get("Ray", True):
from modin.core.execution.ray.common.utils import initialize_ray
| {"golden_diff": "diff --git a/modin/pandas/__init__.py b/modin/pandas/__init__.py\n--- a/modin/pandas/__init__.py\n+++ b/modin/pandas/__init__.py\n@@ -105,8 +105,22 @@\n def _update_engine(publisher: Parameter):\n global dask_client\n from modin.config import StorageFormat, CpuCount\n+ from modin.config.envvars import IsExperimental\n+ from modin.config.pubsub import ValueSource\n \n- if publisher.get() == \"Ray\":\n+ if (\n+ StorageFormat.get() == \"Omnisci\"\n+ and publisher.get_value_source() == ValueSource.DEFAULT\n+ ):\n+ publisher.put(\"Native\")\n+ IsExperimental.put(True)\n+ elif (\n+ publisher.get() == \"Native\"\n+ and StorageFormat.get_value_source() == ValueSource.DEFAULT\n+ ):\n+ StorageFormat.put(\"Omnisci\")\n+ IsExperimental.put(True)\n+ elif publisher.get() == \"Ray\":\n if _is_first_update.get(\"Ray\", True):\n from modin.core.execution.ray.common.utils import initialize_ray\n", "issue": "Improve OmniSci \"quick start\" usability\nRight now to activate OmniSci you'd have to set both `Backend` to `Omnisci` and `Engine` to `native`.\r\nI think we should try to set `Engine` to `native` if it's unset by user while `Backend` is set to `Omnisci`.\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\nimport pandas\n\n__pandas_version__ = \"1.3.4\"\n\nif pandas.__version__ != __pandas_version__:\n import warnings\n\n warnings.warn(\n \"The pandas version installed {} does not match the supported pandas version in\"\n \" Modin {}. This may cause undesired side effects!\".format(\n pandas.__version__, __pandas_version__\n )\n )\n\nfrom pandas import (\n eval,\n cut,\n factorize,\n test,\n qcut,\n date_range,\n period_range,\n Index,\n MultiIndex,\n CategoricalIndex,\n bdate_range,\n DatetimeIndex,\n Timedelta,\n Timestamp,\n to_timedelta,\n set_eng_float_format,\n options,\n Flags,\n set_option,\n NaT,\n PeriodIndex,\n Categorical,\n Interval,\n UInt8Dtype,\n UInt16Dtype,\n UInt32Dtype,\n UInt64Dtype,\n SparseDtype,\n Int8Dtype,\n Int16Dtype,\n Int32Dtype,\n Int64Dtype,\n Float32Dtype,\n Float64Dtype,\n StringDtype,\n BooleanDtype,\n CategoricalDtype,\n DatetimeTZDtype,\n IntervalDtype,\n PeriodDtype,\n RangeIndex,\n Int64Index,\n UInt64Index,\n Float64Index,\n TimedeltaIndex,\n IntervalIndex,\n IndexSlice,\n Grouper,\n array,\n Period,\n show_versions,\n DateOffset,\n timedelta_range,\n infer_freq,\n interval_range,\n ExcelWriter,\n datetime,\n NamedAgg,\n NA,\n api,\n)\nimport os\nimport multiprocessing\n\nfrom modin.config import Engine, Parameter\n\n# Set this so that Pandas doesn't try to multithread by itself\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\n_is_first_update = {}\ndask_client = None\n_NOINIT_ENGINES = {\n \"Python\",\n} # engines that don't require initialization, useful for unit tests\n\n\ndef _update_engine(publisher: Parameter):\n global dask_client\n from modin.config import StorageFormat, CpuCount\n\n if publisher.get() == \"Ray\":\n if _is_first_update.get(\"Ray\", True):\n from modin.core.execution.ray.common.utils import initialize_ray\n\n initialize_ray()\n elif publisher.get() == \"Native\":\n # With OmniSci storage format there is only a single worker per node\n # and we allow it to work on all cores.\n if StorageFormat.get() == \"Omnisci\":\n os.environ[\"OMP_NUM_THREADS\"] = str(CpuCount.get())\n else:\n raise ValueError(\n f\"Storage format should be 'Omnisci' with 'Native' engine, but provided {StorageFormat.get()}.\"\n )\n elif publisher.get() == \"Dask\":\n if _is_first_update.get(\"Dask\", True):\n from modin.core.execution.dask.common.utils import initialize_dask\n\n initialize_dask()\n elif publisher.get() == \"Cloudray\":\n from modin.experimental.cloud import get_connection\n\n conn = get_connection()\n if _is_first_update.get(\"Cloudray\", True):\n\n @conn.teleport\n def init_remote_ray(partition):\n from ray import ray_constants\n import modin\n from modin.core.execution.ray.common.utils import initialize_ray\n\n modin.set_execution(\"Ray\", partition)\n initialize_ray(\n override_is_cluster=True,\n override_redis_address=f\"localhost:{ray_constants.DEFAULT_PORT}\",\n override_redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,\n )\n\n init_remote_ray(StorageFormat.get())\n # import FactoryDispatcher here to initialize IO class\n # so it doesn't skew read_csv() timings later on\n import modin.core.execution.dispatching.factories.dispatcher # noqa: F401\n else:\n get_connection().modules[\"modin\"].set_execution(\"Ray\", StorageFormat.get())\n elif publisher.get() == \"Cloudpython\":\n from modin.experimental.cloud import get_connection\n\n get_connection().modules[\"modin\"].set_execution(\"Python\")\n elif publisher.get() == \"Cloudnative\":\n from modin.experimental.cloud import get_connection\n\n assert (\n StorageFormat.get() == \"Omnisci\"\n ), f\"Storage format should be 'Omnisci' with 'Cloudnative' engine, but provided {StorageFormat.get()}.\"\n get_connection().modules[\"modin\"].set_execution(\"Native\", \"OmniSci\")\n\n elif publisher.get() not in _NOINIT_ENGINES:\n raise ImportError(\"Unrecognized execution engine: {}.\".format(publisher.get()))\n\n _is_first_update[publisher.get()] = False\n\n\nfrom .. import __version__\nfrom .dataframe import DataFrame\nfrom .io import (\n read_csv,\n read_parquet,\n read_json,\n read_html,\n read_clipboard,\n read_excel,\n read_hdf,\n read_feather,\n read_stata,\n read_sas,\n read_pickle,\n read_sql,\n read_gbq,\n read_table,\n read_fwf,\n read_sql_table,\n read_sql_query,\n read_spss,\n ExcelFile,\n to_pickle,\n HDFStore,\n json_normalize,\n read_orc,\n read_xml,\n)\nfrom .series import Series\nfrom .general import (\n concat,\n isna,\n isnull,\n merge,\n merge_asof,\n merge_ordered,\n pivot_table,\n notnull,\n notna,\n pivot,\n to_numeric,\n to_datetime,\n unique,\n value_counts,\n get_dummies,\n melt,\n crosstab,\n lreshape,\n wide_to_long,\n)\nfrom .plotting import Plotting as plotting\n\n__all__ = [\n \"DataFrame\",\n \"Series\",\n \"read_csv\",\n \"read_parquet\",\n \"read_json\",\n \"read_html\",\n \"read_clipboard\",\n \"read_excel\",\n \"read_hdf\",\n \"read_feather\",\n \"read_stata\",\n \"read_sas\",\n \"read_pickle\",\n \"read_sql\",\n \"read_gbq\",\n \"read_table\",\n \"read_spss\",\n \"read_orc\",\n \"json_normalize\",\n \"concat\",\n \"eval\",\n \"cut\",\n \"factorize\",\n \"test\",\n \"qcut\",\n \"to_datetime\",\n \"get_dummies\",\n \"isna\",\n \"isnull\",\n \"merge\",\n \"pivot_table\",\n \"date_range\",\n \"Index\",\n \"MultiIndex\",\n \"Series\",\n \"bdate_range\",\n \"period_range\",\n \"DatetimeIndex\",\n \"to_timedelta\",\n \"set_eng_float_format\",\n \"options\",\n \"set_option\",\n \"CategoricalIndex\",\n \"Timedelta\",\n \"Timestamp\",\n \"NaT\",\n \"PeriodIndex\",\n \"Categorical\",\n \"__version__\",\n \"melt\",\n \"crosstab\",\n \"plotting\",\n \"Interval\",\n \"UInt8Dtype\",\n \"UInt16Dtype\",\n \"UInt32Dtype\",\n \"UInt64Dtype\",\n \"SparseDtype\",\n \"Int8Dtype\",\n \"Int16Dtype\",\n \"Int32Dtype\",\n \"Int64Dtype\",\n \"CategoricalDtype\",\n \"DatetimeTZDtype\",\n \"IntervalDtype\",\n \"PeriodDtype\",\n \"BooleanDtype\",\n \"StringDtype\",\n \"NA\",\n \"RangeIndex\",\n \"Int64Index\",\n \"UInt64Index\",\n \"Float64Index\",\n \"TimedeltaIndex\",\n \"IntervalIndex\",\n \"IndexSlice\",\n \"Grouper\",\n \"array\",\n \"Period\",\n \"show_versions\",\n \"DateOffset\",\n \"timedelta_range\",\n \"infer_freq\",\n \"interval_range\",\n \"ExcelWriter\",\n \"read_fwf\",\n \"read_sql_table\",\n \"read_sql_query\",\n \"ExcelFile\",\n \"to_pickle\",\n \"HDFStore\",\n \"lreshape\",\n \"wide_to_long\",\n \"merge_asof\",\n \"merge_ordered\",\n \"notnull\",\n \"notna\",\n \"pivot\",\n \"to_numeric\",\n \"unique\",\n \"value_counts\",\n \"datetime\",\n \"NamedAgg\",\n \"api\",\n]\n\ndel pandas, Engine, Parameter\n", "path": "modin/pandas/__init__.py"}]} | 3,505 | 250 |
gh_patches_debug_15688 | rasdani/github-patches | git_diff | iterative__dvc-796 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Use dvc to store images for windows installer in our repo
We currently have 3 images stored in git in scripts/innosetup. We should start using dvc for them. Required for https://github.com/iterative/dvc/issues/735 . Depends on https://github.com/iterative/dvc/issues/785 .
</issue>
<code>
[start of dvc/remote/base.py]
1 import os
2 import re
3 import tempfile
4
5 from dvc.config import Config
6 from dvc.logger import Logger
7 from dvc.exceptions import DvcException
8
9
10 STATUS_UNKNOWN = 0
11 STATUS_OK = 1
12 STATUS_MODIFIED = 2
13 STATUS_NEW = 3
14 STATUS_DELETED = 4
15
16
17 STATUS_MAP = {
18 # (local_exists, remote_exists, cmp)
19 (True, True, True) : STATUS_OK,
20 (True, True, False) : STATUS_MODIFIED,
21 (True, False, None) : STATUS_NEW,
22 (False, True, None) : STATUS_DELETED,
23 }
24
25
26 class DataCloudError(DvcException):
27 """ Data Cloud exception """
28 def __init__(self, msg):
29 super(DataCloudError, self).__init__('Data sync error: {}'.format(msg))
30
31
32
33 class RemoteBase(object):
34 REGEX = None
35
36 def __init__(self, project, config):
37 pass
38
39 @classmethod
40 def supported(cls, config):
41 url = config[Config.SECTION_REMOTE_URL]
42 return cls.match(url) != None
43
44 @classmethod
45 def match(cls, url):
46 return re.match(cls.REGEX, url)
47
48 def save_info(self, path_info):
49 raise NotImplementedError
50
51 def save(self, path_info):
52 raise NotImplementedError
53
54 def checkout(self, path_info, checksum_info):
55 raise NotImplementedError
56
57 def download(self, path_info, path):
58 raise NotImplementedError
59
60 def upload(self, path, path_info):
61 raise NotImplementedError
62
63 # Old code starting from here
64
65 def cache_file_key(self, fname):
66 """ Key of a file within the bucket """
67 relpath = os.path.relpath(fname, self.project.cache.local.cache_dir)
68 relpath = relpath.replace('\\', '/')
69 return '{}/{}'.format(self.prefix, relpath).strip('/')
70
71 def cache_key_name(self, path):
72 relpath = os.path.relpath(path, self.project.cache.local.cache_dir)
73 return relpath.replace('\\', '').replace('/', '')
74
75 @staticmethod
76 def tmp_file(fname):
77 """ Temporary name for a partial download """
78 return fname + '.part'
79
80 def _push_key(self, key, path):
81 pass
82
83 def collect(self, arg):
84 from dvc.remote.local import RemoteLOCAL
85
86 path, local = arg
87 ret = [path]
88
89 if not RemoteLOCAL.is_dir_cache(path):
90 return ret
91
92 if local:
93 if not os.path.isfile(path):
94 return ret
95 dir_path = path
96 else:
97 key = self._get_key(path)
98 if not key:
99 Logger.debug("File '{}' does not exist in the cloud".format(path))
100 return ret
101 tmp = os.path.join(tempfile.mkdtemp(), os.path.basename(path))
102 self._pull_key(key, tmp, no_progress_bar=True)
103 dir_path = tmp
104
105 for relpath, md5 in RemoteLOCAL.get_dir_cache(dir_path).items():
106 cache = self.project.cache.local.get(md5)
107 ret.append(cache)
108
109 return ret
110
111 def _cmp_checksum(self, blob, fname):
112 md5 = self.project.cache.local.path_to_md5(fname)
113 if self.project.cache.local.state.changed(fname, md5=md5):
114 return False
115
116 return True
117
118 def push(self, path):
119 key = self._get_key(path)
120 if key:
121 Logger.debug("File '{}' already uploaded to the cloud. Validating checksum...".format(path))
122 if self._cmp_checksum(key, path):
123 Logger.debug('File checksum matches. No uploading is needed.')
124 return []
125 Logger.debug('Checksum mismatch. Reuploading is required.')
126
127 key = self._new_key(path)
128 return self._push_key(key, path)
129
130 def _makedirs(self, fname):
131 dname = os.path.dirname(fname)
132 try:
133 os.makedirs(dname)
134 except OSError as e:
135 if e.errno != os.errno.EEXIST:
136 raise
137
138 def _pull_key(self, key, path, no_progress_bar=False):
139 """ Cloud-specific method of pulling keys """
140 pass
141
142 def _get_key(self, path):
143 """ Cloud-specific method of getting keys """
144 pass
145
146 def pull(self, path):
147 """ Generic method for pulling data from the cloud """
148 key = self._get_key(path)
149 if not key:
150 Logger.error("File '{}' does not exist in the cloud".format(path))
151 return None
152
153 return self._pull_key(key, path)
154
155 def _status(self, key, path):
156 remote_exists = key != None
157 local_exists = os.path.exists(path)
158
159 diff = None
160 if remote_exists and local_exists:
161 diff = self._cmp_checksum(key, path)
162
163 return STATUS_MAP.get((local_exists, remote_exists, diff), STATUS_UNKNOWN)
164
165 def status(self, path):
166 """
167 Generic method for checking data item status.
168 """
169 key = self._get_key(path)
170 if not key:
171 return STATUS_NEW
172
173 return self._status(key, path)
174
175 def connect(self):
176 pass
177
178 def disconnect(self):
179 pass
180
181 def __enter__(self):
182 self.connect()
183
184 def __exit__(self, type, value, tb):
185 self.disconnect()
186
[end of dvc/remote/base.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/remote/base.py b/dvc/remote/base.py
--- a/dvc/remote/base.py
+++ b/dvc/remote/base.py
@@ -1,6 +1,7 @@
import os
import re
import tempfile
+import posixpath
from dvc.config import Config
from dvc.logger import Logger
@@ -66,7 +67,7 @@
""" Key of a file within the bucket """
relpath = os.path.relpath(fname, self.project.cache.local.cache_dir)
relpath = relpath.replace('\\', '/')
- return '{}/{}'.format(self.prefix, relpath).strip('/')
+ return posixpath.join(self.prefix, relpath).strip('/')
def cache_key_name(self, path):
relpath = os.path.relpath(path, self.project.cache.local.cache_dir)
| {"golden_diff": "diff --git a/dvc/remote/base.py b/dvc/remote/base.py\n--- a/dvc/remote/base.py\n+++ b/dvc/remote/base.py\n@@ -1,6 +1,7 @@\n import os\n import re\n import tempfile\n+import posixpath\n \n from dvc.config import Config\n from dvc.logger import Logger\n@@ -66,7 +67,7 @@\n \"\"\" Key of a file within the bucket \"\"\"\n relpath = os.path.relpath(fname, self.project.cache.local.cache_dir)\n relpath = relpath.replace('\\\\', '/')\n- return '{}/{}'.format(self.prefix, relpath).strip('/')\n+ return posixpath.join(self.prefix, relpath).strip('/')\n \n def cache_key_name(self, path):\n relpath = os.path.relpath(path, self.project.cache.local.cache_dir)\n", "issue": "Use dvc to store images for windows installer in our repo\nWe currently have 3 images stored in git in scripts/innosetup. We should start using dvc for them. Required for https://github.com/iterative/dvc/issues/735 . Depends on https://github.com/iterative/dvc/issues/785 .\n", "before_files": [{"content": "import os\nimport re\nimport tempfile\n\nfrom dvc.config import Config\nfrom dvc.logger import Logger\nfrom dvc.exceptions import DvcException\n\n\nSTATUS_UNKNOWN = 0\nSTATUS_OK = 1\nSTATUS_MODIFIED = 2\nSTATUS_NEW = 3\nSTATUS_DELETED = 4\n\n\nSTATUS_MAP = {\n # (local_exists, remote_exists, cmp)\n (True, True, True) : STATUS_OK,\n (True, True, False) : STATUS_MODIFIED,\n (True, False, None) : STATUS_NEW,\n (False, True, None) : STATUS_DELETED,\n}\n\n\nclass DataCloudError(DvcException):\n \"\"\" Data Cloud exception \"\"\"\n def __init__(self, msg):\n super(DataCloudError, self).__init__('Data sync error: {}'.format(msg))\n\n\n\nclass RemoteBase(object):\n REGEX = None\n\n def __init__(self, project, config):\n pass\n\n @classmethod\n def supported(cls, config):\n url = config[Config.SECTION_REMOTE_URL]\n return cls.match(url) != None\n\n @classmethod\n def match(cls, url):\n return re.match(cls.REGEX, url)\n\n def save_info(self, path_info):\n raise NotImplementedError\n\n def save(self, path_info):\n raise NotImplementedError\n\n def checkout(self, path_info, checksum_info):\n raise NotImplementedError\n\n def download(self, path_info, path):\n raise NotImplementedError\n\n def upload(self, path, path_info):\n raise NotImplementedError\n\n # Old code starting from here\n\n def cache_file_key(self, fname):\n \"\"\" Key of a file within the bucket \"\"\"\n relpath = os.path.relpath(fname, self.project.cache.local.cache_dir)\n relpath = relpath.replace('\\\\', '/')\n return '{}/{}'.format(self.prefix, relpath).strip('/')\n\n def cache_key_name(self, path):\n relpath = os.path.relpath(path, self.project.cache.local.cache_dir)\n return relpath.replace('\\\\', '').replace('/', '')\n\n @staticmethod\n def tmp_file(fname):\n \"\"\" Temporary name for a partial download \"\"\"\n return fname + '.part'\n\n def _push_key(self, key, path):\n pass\n\n def collect(self, arg):\n from dvc.remote.local import RemoteLOCAL\n\n path, local = arg\n ret = [path]\n\n if not RemoteLOCAL.is_dir_cache(path):\n return ret\n\n if local:\n if not os.path.isfile(path):\n return ret\n dir_path = path\n else:\n key = self._get_key(path)\n if not key:\n Logger.debug(\"File '{}' does not exist in the cloud\".format(path))\n return ret\n tmp = os.path.join(tempfile.mkdtemp(), os.path.basename(path))\n self._pull_key(key, tmp, no_progress_bar=True)\n dir_path = tmp\n\n for relpath, md5 in RemoteLOCAL.get_dir_cache(dir_path).items():\n cache = self.project.cache.local.get(md5)\n ret.append(cache)\n\n return ret\n\n def _cmp_checksum(self, blob, fname):\n md5 = self.project.cache.local.path_to_md5(fname)\n if self.project.cache.local.state.changed(fname, md5=md5):\n return False\n\n return True\n\n def push(self, path):\n key = self._get_key(path)\n if key:\n Logger.debug(\"File '{}' already uploaded to the cloud. Validating checksum...\".format(path))\n if self._cmp_checksum(key, path):\n Logger.debug('File checksum matches. No uploading is needed.')\n return []\n Logger.debug('Checksum mismatch. Reuploading is required.')\n\n key = self._new_key(path)\n return self._push_key(key, path)\n\n def _makedirs(self, fname):\n dname = os.path.dirname(fname)\n try:\n os.makedirs(dname)\n except OSError as e:\n if e.errno != os.errno.EEXIST:\n raise\n\n def _pull_key(self, key, path, no_progress_bar=False):\n \"\"\" Cloud-specific method of pulling keys \"\"\"\n pass\n\n def _get_key(self, path):\n \"\"\" Cloud-specific method of getting keys \"\"\"\n pass\n\n def pull(self, path):\n \"\"\" Generic method for pulling data from the cloud \"\"\"\n key = self._get_key(path)\n if not key:\n Logger.error(\"File '{}' does not exist in the cloud\".format(path))\n return None\n\n return self._pull_key(key, path)\n\n def _status(self, key, path):\n remote_exists = key != None\n local_exists = os.path.exists(path)\n\n diff = None\n if remote_exists and local_exists:\n diff = self._cmp_checksum(key, path)\n\n return STATUS_MAP.get((local_exists, remote_exists, diff), STATUS_UNKNOWN)\n\n def status(self, path):\n \"\"\"\n Generic method for checking data item status.\n \"\"\"\n key = self._get_key(path)\n if not key:\n return STATUS_NEW\n\n return self._status(key, path)\n\n def connect(self):\n pass\n\n def disconnect(self):\n pass\n\n def __enter__(self):\n self.connect()\n\n def __exit__(self, type, value, tb):\n self.disconnect()\n", "path": "dvc/remote/base.py"}]} | 2,209 | 179 |
gh_patches_debug_27759 | rasdani/github-patches | git_diff | mdn__kuma-6029 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Search json endpoint is not available in new front-end
**Summary**
https://twitter.com/klaascuvelier/status/1182203293117886464
**Steps To Reproduce (STR)**
_How can we reproduce the problem?_
Go to https://developer.mozilla.org/en-US/search.json?q=array
**Actual behavior**
Blank page
**Expected behavior**
JSON is returned like it is now only at https://wiki.developer.mozilla.org/en-US/search.json?q=array
**Additional context**
There might be a few external services, twitter bots etc. that depend on this endpoint.
</issue>
<code>
[start of kuma/search/views.py]
1 from django.shortcuts import render
2 from django.views.decorators.cache import never_cache
3 from django.views.decorators.http import require_GET
4 from ratelimit.decorators import ratelimit
5
6 from kuma.api.v1.views import search as search_api
7 from kuma.core.decorators import shared_cache_control
8 from kuma.core.utils import is_wiki
9
10 from .search import SearchView
11
12 # Since the search endpoint accepts user input (via query parameters) and its
13 # response is compressed, use rate limiting to mitigate the BREACH attack
14 # (see http://breachattack.com/). It still needs to allow a user to click
15 # the filter switches (bug 1426968).
16 # Alternate: forbid gzip by setting Content-Encoding: identity
17 @never_cache
18 @require_GET
19 @ratelimit(key='user_or_ip', rate='25/m', block=True)
20 def search(request, *args, **kwargs):
21 """
22 The search view.
23 """
24 if is_wiki(request):
25 return wiki_search(request, *args, **kwargs)
26
27 results = search_api(request, *args, **kwargs).data
28 context = {
29 'results': {
30 'results': None if results.get('error') else results
31 }
32 }
33
34 return render(request, 'search/react.html', context)
35
36
37 wiki_search = SearchView.as_view()
38
39
40 @shared_cache_control(s_maxage=60 * 60 * 24 * 7)
41 def plugin(request):
42 """Render an OpenSearch Plugin."""
43 return render(request, 'search/plugin.html', {
44 'locale': request.LANGUAGE_CODE
45 }, content_type='application/opensearchdescription+xml')
46
[end of kuma/search/views.py]
[start of kuma/search/urls.py]
1 from django.conf.urls import url
2 from rest_framework.urlpatterns import format_suffix_patterns
3
4 from . import views
5
6 # this allows using ".json" extensions for the view to force json output
7 lang_base_urlpatterns = format_suffix_patterns(
8 [url(r'^$', views.search, name='search')])
9
10
11 lang_urlpatterns = [
12 url(r'^xml$',
13 views.plugin,
14 name='search.plugin'),
15 ]
16
[end of kuma/search/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kuma/search/urls.py b/kuma/search/urls.py
--- a/kuma/search/urls.py
+++ b/kuma/search/urls.py
@@ -1,11 +1,11 @@
from django.conf.urls import url
-from rest_framework.urlpatterns import format_suffix_patterns
from . import views
-# this allows using ".json" extensions for the view to force json output
-lang_base_urlpatterns = format_suffix_patterns(
- [url(r'^$', views.search, name='search')])
+lang_base_urlpatterns = [
+ url(r'^$', views.search, name='search'),
+ url(r'^.(?P<format>json)$', views.SearchRedirectView.as_view())
+]
lang_urlpatterns = [
diff --git a/kuma/search/views.py b/kuma/search/views.py
--- a/kuma/search/views.py
+++ b/kuma/search/views.py
@@ -1,6 +1,8 @@
from django.shortcuts import render
+from django.urls import reverse_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.http import require_GET
+from django.views.generic import RedirectView
from ratelimit.decorators import ratelimit
from kuma.api.v1.views import search as search_api
@@ -37,6 +39,17 @@
wiki_search = SearchView.as_view()
+class SearchRedirectView(RedirectView):
+ permanent = True
+
+ def get_redirect_url(self, *args, **kwargs):
+ query_string = self.request.META.get('QUERY_STRING')
+ url = reverse_lazy('api.v1.search', kwargs={'locale': self.request.LANGUAGE_CODE})
+ if query_string:
+ url += '?' + query_string
+ return url
+
+
@shared_cache_control(s_maxage=60 * 60 * 24 * 7)
def plugin(request):
"""Render an OpenSearch Plugin."""
| {"golden_diff": "diff --git a/kuma/search/urls.py b/kuma/search/urls.py\n--- a/kuma/search/urls.py\n+++ b/kuma/search/urls.py\n@@ -1,11 +1,11 @@\n from django.conf.urls import url\n-from rest_framework.urlpatterns import format_suffix_patterns\n \n from . import views\n \n-# this allows using \".json\" extensions for the view to force json output\n-lang_base_urlpatterns = format_suffix_patterns(\n- [url(r'^$', views.search, name='search')])\n+lang_base_urlpatterns = [\n+ url(r'^$', views.search, name='search'),\n+ url(r'^.(?P<format>json)$', views.SearchRedirectView.as_view())\n+]\n \n \n lang_urlpatterns = [\ndiff --git a/kuma/search/views.py b/kuma/search/views.py\n--- a/kuma/search/views.py\n+++ b/kuma/search/views.py\n@@ -1,6 +1,8 @@\n from django.shortcuts import render\n+from django.urls import reverse_lazy\n from django.views.decorators.cache import never_cache\n from django.views.decorators.http import require_GET\n+from django.views.generic import RedirectView\n from ratelimit.decorators import ratelimit\n \n from kuma.api.v1.views import search as search_api\n@@ -37,6 +39,17 @@\n wiki_search = SearchView.as_view()\n \n \n+class SearchRedirectView(RedirectView):\n+ permanent = True\n+\n+ def get_redirect_url(self, *args, **kwargs):\n+ query_string = self.request.META.get('QUERY_STRING')\n+ url = reverse_lazy('api.v1.search', kwargs={'locale': self.request.LANGUAGE_CODE})\n+ if query_string:\n+ url += '?' + query_string\n+ return url\n+\n+\n @shared_cache_control(s_maxage=60 * 60 * 24 * 7)\n def plugin(request):\n \"\"\"Render an OpenSearch Plugin.\"\"\"\n", "issue": "Search json endpoint is not available in new front-end\n**Summary**\r\nhttps://twitter.com/klaascuvelier/status/1182203293117886464\r\n\r\n\r\n**Steps To Reproduce (STR)**\r\n_How can we reproduce the problem?_\r\n\r\nGo to https://developer.mozilla.org/en-US/search.json?q=array\r\n \r\n\r\n\r\n**Actual behavior**\r\nBlank page\r\n\r\n\r\n**Expected behavior**\r\nJSON is returned like it is now only at https://wiki.developer.mozilla.org/en-US/search.json?q=array\r\n\r\n\r\n**Additional context**\r\nThere might be a few external services, twitter bots etc. that depend on this endpoint.\r\n\n", "before_files": [{"content": "from django.shortcuts import render\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.http import require_GET\nfrom ratelimit.decorators import ratelimit\n\nfrom kuma.api.v1.views import search as search_api\nfrom kuma.core.decorators import shared_cache_control\nfrom kuma.core.utils import is_wiki\n\nfrom .search import SearchView\n\n# Since the search endpoint accepts user input (via query parameters) and its\n# response is compressed, use rate limiting to mitigate the BREACH attack\n# (see http://breachattack.com/). It still needs to allow a user to click\n# the filter switches (bug 1426968).\n# Alternate: forbid gzip by setting Content-Encoding: identity\n@never_cache\n@require_GET\n@ratelimit(key='user_or_ip', rate='25/m', block=True)\ndef search(request, *args, **kwargs):\n \"\"\"\n The search view.\n \"\"\"\n if is_wiki(request):\n return wiki_search(request, *args, **kwargs)\n\n results = search_api(request, *args, **kwargs).data\n context = {\n 'results': {\n 'results': None if results.get('error') else results\n }\n }\n\n return render(request, 'search/react.html', context)\n\n\nwiki_search = SearchView.as_view()\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 7)\ndef plugin(request):\n \"\"\"Render an OpenSearch Plugin.\"\"\"\n return render(request, 'search/plugin.html', {\n 'locale': request.LANGUAGE_CODE\n }, content_type='application/opensearchdescription+xml')\n", "path": "kuma/search/views.py"}, {"content": "from django.conf.urls import url\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom . import views\n\n# this allows using \".json\" extensions for the view to force json output\nlang_base_urlpatterns = format_suffix_patterns(\n [url(r'^$', views.search, name='search')])\n\n\nlang_urlpatterns = [\n url(r'^xml$',\n views.plugin,\n name='search.plugin'),\n]\n", "path": "kuma/search/urls.py"}]} | 1,239 | 408 |
gh_patches_debug_39489 | rasdani/github-patches | git_diff | deepset-ai__haystack-6822 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Implement function to calculate F1 metric
As specified in proposal #5794 we need to implement a function to calculate the F1 metric.
Ideally the function should be part of the private interface and called only through the `calculate_metrics` function (see #6063). `_calculate_f1()` could be a nice name.
For more detailed information check out the original proposal.
</issue>
<code>
[start of haystack/evaluation/eval.py]
1 from typing import Any, Callable, Dict, List, Union
2
3 import numpy as np
4
5 from haystack import Pipeline
6 from haystack.core.component import Component
7 from haystack.evaluation.eval_utils import get_answers_from_output, preprocess_text
8 from haystack.evaluation.metrics import Metric, MetricsResult
9
10
11 class EvaluationResult:
12 """
13 EvaluationResult keeps track of all the information related to evaluation, namely the runnable (Pipeline or
14 component), inputs, outputs, and expected outputs.
15 The EvaluationResult keeps track of all the information stored by eval.
16
17 :param runnable: The runnable (Pipeline or component) used for evaluation.
18 :param inputs: List of inputs used for evaluation.
19 :param outputs: List of outputs generated by the runnable.
20 :param expected_outputs: List of expected outputs used for evaluation.
21 """
22
23 def __init__(
24 self,
25 runnable: Union[Pipeline, Component],
26 inputs: List[Dict[str, Any]],
27 outputs: List[Dict[str, Any]],
28 expected_outputs: List[Dict[str, Any]],
29 ) -> None:
30 self.runnable = runnable
31 self.inputs = inputs
32 self.outputs = outputs
33 self.expected_outputs = expected_outputs
34
35 # Determine the type of the runnable
36 if str(type(runnable).__name__) == "Pipeline":
37 self.runnable_type = "pipeline"
38 else:
39 self.runnable_type = "component"
40
41 # Mapping of metrics to their corresponding functions.
42 # This should be kept in sync with the Metric enum
43 self._supported_metrics: Dict[Metric, Callable[..., MetricsResult]] = {
44 Metric.RECALL: self._calculate_recall,
45 Metric.MRR: self._calculate_mrr,
46 Metric.MAP: self._calculate_map,
47 Metric.F1: self._calculate_f1,
48 Metric.EM: self._calculate_em,
49 Metric.SAS: self._calculate_sas,
50 }
51
52 def calculate_metrics(self, metric: Union[Metric, Callable[..., MetricsResult]], **kwargs) -> MetricsResult:
53 """
54 Calculate evaluation metrics based on the provided Metric or using the custom metric function.
55
56 :param metric: The Metric indicating the type of metric to calculate or custom function to compute.
57 :return: MetricsResult containing the calculated metric.
58 """
59
60 if isinstance(metric, Metric):
61 return self._supported_metrics[metric](**kwargs)
62
63 return metric(self, **kwargs)
64
65 def _calculate_recall(self):
66 return MetricsResult({"recall": None})
67
68 def _calculate_map(self):
69 return MetricsResult({"mean_average_precision": None})
70
71 def _calculate_mrr(self):
72 return MetricsResult({"mean_reciprocal_rank": None})
73
74 def _calculate_f1(self):
75 return MetricsResult({"f1": None})
76
77 def _calculate_em(
78 self, output_key: str, regexes_to_ignore=None, ignore_case=False, ignore_punctuation=False, ignore_numbers=False
79 ) -> MetricsResult:
80 """
81 Calculates the Exact Match (EM) score between two lists of predictions and labels.
82 Exact Match (EM) score measures the percentage of samples where the predicted text exactly matches the
83 corresponding ground truth label.
84
85 :param output_key: The key of the output to use for comparison.
86 :param regexes_to_ignore (list, optional): A list of regular expressions. If provided, it removes substrings
87 matching these regular expressions from both predictions and labels before comparison. Defaults to None.
88 :param ignore_case (bool, optional): If True, performs case-insensitive comparison. Defaults to False.
89 :param ignore_punctuation (bool, optional): If True, removes punctuation from both predictions and labels before
90 comparison. Defaults to False.
91 :param ignore_numbers (bool, optional): If True, removes numerical digits from both predictions and labels
92 before comparison. Defaults to False.
93 :return: A MetricsResult object containing the calculated Exact Match (EM) score.
94 """
95
96 predictions = get_answers_from_output(
97 outputs=self.outputs, output_key=output_key, runnable_type=self.runnable_type
98 )
99 labels = get_answers_from_output(
100 outputs=self.expected_outputs, output_key=output_key, runnable_type=self.runnable_type
101 )
102
103 if len(predictions) != len(labels):
104 raise ValueError("The number of predictions and labels must be the same.")
105 if len(predictions) == len(labels) == 0:
106 # Return Exact Match as 0 for no inputs
107 return MetricsResult({"exact_match": 0.0})
108
109 predictions = preprocess_text(predictions, regexes_to_ignore, ignore_case, ignore_punctuation, ignore_numbers)
110 labels = preprocess_text(labels, regexes_to_ignore, ignore_case, ignore_punctuation, ignore_numbers)
111
112 score_list = np.array(predictions) == np.array(labels)
113 exact_match_score = np.mean(score_list)
114
115 return MetricsResult({"exact_match": exact_match_score})
116
117 def _calculate_sas(self):
118 return MetricsResult({"exact_match": None})
119
120
121 def eval(
122 runnable: Union[Pipeline, Component], inputs: List[Dict[str, Any]], expected_outputs: List[Dict[str, Any]]
123 ) -> EvaluationResult:
124 """
125 Evaluates the provided Pipeline or component based on the given inputs and expected outputs.
126
127 This function facilitates the evaluation of a given runnable (either a Pipeline or a component) using the provided
128 inputs and corresponding expected outputs.
129
130 :param runnable: The runnable (Pipeline or component) used for evaluation.
131 :param inputs: List of inputs used for evaluation.
132 :param expected_outputs: List of expected outputs used for evaluation.
133
134 :return: An instance of EvaluationResult containing information about the evaluation, including the runnable,
135 inputs, outputs, and expected outputs.
136 """
137
138 outputs = []
139
140 # Check that expected outputs has the correct shape
141 if len(inputs) != len(expected_outputs):
142 raise ValueError(
143 f"The number of inputs ({len(inputs)}) does not match the number of expected outputs "
144 f"({len(expected_outputs)}). Please ensure that each input has a corresponding expected output."
145 )
146
147 for input_ in inputs:
148 output = runnable.run(input_)
149 outputs.append(output)
150
151 return EvaluationResult(runnable, inputs, outputs, expected_outputs)
152
[end of haystack/evaluation/eval.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/haystack/evaluation/eval.py b/haystack/evaluation/eval.py
--- a/haystack/evaluation/eval.py
+++ b/haystack/evaluation/eval.py
@@ -1,3 +1,4 @@
+import collections
from typing import Any, Callable, Dict, List, Union
import numpy as np
@@ -71,8 +72,68 @@
def _calculate_mrr(self):
return MetricsResult({"mean_reciprocal_rank": None})
- def _calculate_f1(self):
- return MetricsResult({"f1": None})
+ def _compute_f1_single(self, label_toks: List[str], pred_toks: List[str]) -> float:
+ """
+ Compute F1 score for a single sample.
+ """
+ common: collections.Counter = collections.Counter(label_toks) & collections.Counter(pred_toks)
+ num_same = sum(common.values())
+ if len(label_toks) == 0 or len(pred_toks) == 0:
+ # If either is no-answer, then F1 is 1 if they agree, 0 otherwise
+ return int(label_toks == pred_toks)
+ if num_same == 0:
+ return 0
+ precision = 1.0 * num_same / len(pred_toks)
+ recall = 1.0 * num_same / len(label_toks)
+ f1 = (2 * precision * recall) / (precision + recall)
+ return f1
+
+ def _calculate_f1(
+ self, output_key: str, regexes_to_ignore=None, ignore_case=False, ignore_punctuation=False, ignore_numbers=False
+ ) -> MetricsResult:
+ """
+ Calculates the F1 score between two lists of predictions and labels.
+ F1 score measures the word overlap between the predicted text and the corresponding ground truth label.
+
+ :param output_key: The key of the output to use for comparison.
+ :param regexes_to_ignore (list, optional): A list of regular expressions. If provided, it removes substrings
+ matching these regular expressions from both predictions and labels before comparison. Defaults to None.
+ :param ignore_case (bool, optional): If True, performs case-insensitive comparison. Defaults to False.
+ :param ignore_punctuation (bool, optional): If True, removes punctuation from both predictions and labels before
+ comparison. Defaults to False.
+ :param ignore_numbers (bool, optional): If True, removes numerical digits from both predictions and labels
+ before comparison. Defaults to False.
+ :return: A MetricsResult object containing the calculated Exact Match (EM) score.
+ """
+
+ predictions = get_answers_from_output(
+ outputs=self.outputs, output_key=output_key, runnable_type=self.runnable_type
+ )
+ labels = get_answers_from_output(
+ outputs=self.expected_outputs, output_key=output_key, runnable_type=self.runnable_type
+ )
+
+ if len(predictions) != len(labels):
+ raise ValueError("The number of predictions and labels must be the same.")
+ if len(predictions) == len(labels) == 0:
+ # Return F1 as 0 for no inputs
+ return MetricsResult({"f1": 0.0})
+
+ predictions = preprocess_text(predictions, regexes_to_ignore, ignore_case, ignore_punctuation, ignore_numbers)
+ labels = preprocess_text(labels, regexes_to_ignore, ignore_case, ignore_punctuation, ignore_numbers)
+
+ # Tokenize by splitting on spaces
+ tokenized_predictions = [pred.split() for pred in predictions]
+ tokenized_labels = [label.split() for label in labels]
+
+ f1_scores = [
+ self._compute_f1_single(label_toks, pred_toks)
+ for label_toks, pred_toks in zip(tokenized_labels, tokenized_predictions)
+ ]
+
+ f1 = np.mean(f1_scores)
+
+ return MetricsResult({"f1": f1})
def _calculate_em(
self, output_key: str, regexes_to_ignore=None, ignore_case=False, ignore_punctuation=False, ignore_numbers=False
| {"golden_diff": "diff --git a/haystack/evaluation/eval.py b/haystack/evaluation/eval.py\n--- a/haystack/evaluation/eval.py\n+++ b/haystack/evaluation/eval.py\n@@ -1,3 +1,4 @@\n+import collections\n from typing import Any, Callable, Dict, List, Union\n \n import numpy as np\n@@ -71,8 +72,68 @@\n def _calculate_mrr(self):\n return MetricsResult({\"mean_reciprocal_rank\": None})\n \n- def _calculate_f1(self):\n- return MetricsResult({\"f1\": None})\n+ def _compute_f1_single(self, label_toks: List[str], pred_toks: List[str]) -> float:\n+ \"\"\"\n+ Compute F1 score for a single sample.\n+ \"\"\"\n+ common: collections.Counter = collections.Counter(label_toks) & collections.Counter(pred_toks)\n+ num_same = sum(common.values())\n+ if len(label_toks) == 0 or len(pred_toks) == 0:\n+ # If either is no-answer, then F1 is 1 if they agree, 0 otherwise\n+ return int(label_toks == pred_toks)\n+ if num_same == 0:\n+ return 0\n+ precision = 1.0 * num_same / len(pred_toks)\n+ recall = 1.0 * num_same / len(label_toks)\n+ f1 = (2 * precision * recall) / (precision + recall)\n+ return f1\n+\n+ def _calculate_f1(\n+ self, output_key: str, regexes_to_ignore=None, ignore_case=False, ignore_punctuation=False, ignore_numbers=False\n+ ) -> MetricsResult:\n+ \"\"\"\n+ Calculates the F1 score between two lists of predictions and labels.\n+ F1 score measures the word overlap between the predicted text and the corresponding ground truth label.\n+\n+ :param output_key: The key of the output to use for comparison.\n+ :param regexes_to_ignore (list, optional): A list of regular expressions. If provided, it removes substrings\n+ matching these regular expressions from both predictions and labels before comparison. Defaults to None.\n+ :param ignore_case (bool, optional): If True, performs case-insensitive comparison. Defaults to False.\n+ :param ignore_punctuation (bool, optional): If True, removes punctuation from both predictions and labels before\n+ comparison. Defaults to False.\n+ :param ignore_numbers (bool, optional): If True, removes numerical digits from both predictions and labels\n+ before comparison. Defaults to False.\n+ :return: A MetricsResult object containing the calculated Exact Match (EM) score.\n+ \"\"\"\n+\n+ predictions = get_answers_from_output(\n+ outputs=self.outputs, output_key=output_key, runnable_type=self.runnable_type\n+ )\n+ labels = get_answers_from_output(\n+ outputs=self.expected_outputs, output_key=output_key, runnable_type=self.runnable_type\n+ )\n+\n+ if len(predictions) != len(labels):\n+ raise ValueError(\"The number of predictions and labels must be the same.\")\n+ if len(predictions) == len(labels) == 0:\n+ # Return F1 as 0 for no inputs\n+ return MetricsResult({\"f1\": 0.0})\n+\n+ predictions = preprocess_text(predictions, regexes_to_ignore, ignore_case, ignore_punctuation, ignore_numbers)\n+ labels = preprocess_text(labels, regexes_to_ignore, ignore_case, ignore_punctuation, ignore_numbers)\n+\n+ # Tokenize by splitting on spaces\n+ tokenized_predictions = [pred.split() for pred in predictions]\n+ tokenized_labels = [label.split() for label in labels]\n+\n+ f1_scores = [\n+ self._compute_f1_single(label_toks, pred_toks)\n+ for label_toks, pred_toks in zip(tokenized_labels, tokenized_predictions)\n+ ]\n+\n+ f1 = np.mean(f1_scores)\n+\n+ return MetricsResult({\"f1\": f1})\n \n def _calculate_em(\n self, output_key: str, regexes_to_ignore=None, ignore_case=False, ignore_punctuation=False, ignore_numbers=False\n", "issue": "Implement function to calculate F1 metric\nAs specified in proposal #5794 we need to implement a function to calculate the F1 metric.\r\n\r\nIdeally the function should be part of the private interface and called only through the `calculate_metrics` function (see #6063). `_calculate_f1()` could be a nice name.\r\n\r\nFor more detailed information check out the original proposal.\n", "before_files": [{"content": "from typing import Any, Callable, Dict, List, Union\n\nimport numpy as np\n\nfrom haystack import Pipeline\nfrom haystack.core.component import Component\nfrom haystack.evaluation.eval_utils import get_answers_from_output, preprocess_text\nfrom haystack.evaluation.metrics import Metric, MetricsResult\n\n\nclass EvaluationResult:\n \"\"\"\n EvaluationResult keeps track of all the information related to evaluation, namely the runnable (Pipeline or\n component), inputs, outputs, and expected outputs.\n The EvaluationResult keeps track of all the information stored by eval.\n\n :param runnable: The runnable (Pipeline or component) used for evaluation.\n :param inputs: List of inputs used for evaluation.\n :param outputs: List of outputs generated by the runnable.\n :param expected_outputs: List of expected outputs used for evaluation.\n \"\"\"\n\n def __init__(\n self,\n runnable: Union[Pipeline, Component],\n inputs: List[Dict[str, Any]],\n outputs: List[Dict[str, Any]],\n expected_outputs: List[Dict[str, Any]],\n ) -> None:\n self.runnable = runnable\n self.inputs = inputs\n self.outputs = outputs\n self.expected_outputs = expected_outputs\n\n # Determine the type of the runnable\n if str(type(runnable).__name__) == \"Pipeline\":\n self.runnable_type = \"pipeline\"\n else:\n self.runnable_type = \"component\"\n\n # Mapping of metrics to their corresponding functions.\n # This should be kept in sync with the Metric enum\n self._supported_metrics: Dict[Metric, Callable[..., MetricsResult]] = {\n Metric.RECALL: self._calculate_recall,\n Metric.MRR: self._calculate_mrr,\n Metric.MAP: self._calculate_map,\n Metric.F1: self._calculate_f1,\n Metric.EM: self._calculate_em,\n Metric.SAS: self._calculate_sas,\n }\n\n def calculate_metrics(self, metric: Union[Metric, Callable[..., MetricsResult]], **kwargs) -> MetricsResult:\n \"\"\"\n Calculate evaluation metrics based on the provided Metric or using the custom metric function.\n\n :param metric: The Metric indicating the type of metric to calculate or custom function to compute.\n :return: MetricsResult containing the calculated metric.\n \"\"\"\n\n if isinstance(metric, Metric):\n return self._supported_metrics[metric](**kwargs)\n\n return metric(self, **kwargs)\n\n def _calculate_recall(self):\n return MetricsResult({\"recall\": None})\n\n def _calculate_map(self):\n return MetricsResult({\"mean_average_precision\": None})\n\n def _calculate_mrr(self):\n return MetricsResult({\"mean_reciprocal_rank\": None})\n\n def _calculate_f1(self):\n return MetricsResult({\"f1\": None})\n\n def _calculate_em(\n self, output_key: str, regexes_to_ignore=None, ignore_case=False, ignore_punctuation=False, ignore_numbers=False\n ) -> MetricsResult:\n \"\"\"\n Calculates the Exact Match (EM) score between two lists of predictions and labels.\n Exact Match (EM) score measures the percentage of samples where the predicted text exactly matches the\n corresponding ground truth label.\n\n :param output_key: The key of the output to use for comparison.\n :param regexes_to_ignore (list, optional): A list of regular expressions. If provided, it removes substrings\n matching these regular expressions from both predictions and labels before comparison. Defaults to None.\n :param ignore_case (bool, optional): If True, performs case-insensitive comparison. Defaults to False.\n :param ignore_punctuation (bool, optional): If True, removes punctuation from both predictions and labels before\n comparison. Defaults to False.\n :param ignore_numbers (bool, optional): If True, removes numerical digits from both predictions and labels\n before comparison. Defaults to False.\n :return: A MetricsResult object containing the calculated Exact Match (EM) score.\n \"\"\"\n\n predictions = get_answers_from_output(\n outputs=self.outputs, output_key=output_key, runnable_type=self.runnable_type\n )\n labels = get_answers_from_output(\n outputs=self.expected_outputs, output_key=output_key, runnable_type=self.runnable_type\n )\n\n if len(predictions) != len(labels):\n raise ValueError(\"The number of predictions and labels must be the same.\")\n if len(predictions) == len(labels) == 0:\n # Return Exact Match as 0 for no inputs\n return MetricsResult({\"exact_match\": 0.0})\n\n predictions = preprocess_text(predictions, regexes_to_ignore, ignore_case, ignore_punctuation, ignore_numbers)\n labels = preprocess_text(labels, regexes_to_ignore, ignore_case, ignore_punctuation, ignore_numbers)\n\n score_list = np.array(predictions) == np.array(labels)\n exact_match_score = np.mean(score_list)\n\n return MetricsResult({\"exact_match\": exact_match_score})\n\n def _calculate_sas(self):\n return MetricsResult({\"exact_match\": None})\n\n\ndef eval(\n runnable: Union[Pipeline, Component], inputs: List[Dict[str, Any]], expected_outputs: List[Dict[str, Any]]\n) -> EvaluationResult:\n \"\"\"\n Evaluates the provided Pipeline or component based on the given inputs and expected outputs.\n\n This function facilitates the evaluation of a given runnable (either a Pipeline or a component) using the provided\n inputs and corresponding expected outputs.\n\n :param runnable: The runnable (Pipeline or component) used for evaluation.\n :param inputs: List of inputs used for evaluation.\n :param expected_outputs: List of expected outputs used for evaluation.\n\n :return: An instance of EvaluationResult containing information about the evaluation, including the runnable,\n inputs, outputs, and expected outputs.\n \"\"\"\n\n outputs = []\n\n # Check that expected outputs has the correct shape\n if len(inputs) != len(expected_outputs):\n raise ValueError(\n f\"The number of inputs ({len(inputs)}) does not match the number of expected outputs \"\n f\"({len(expected_outputs)}). Please ensure that each input has a corresponding expected output.\"\n )\n\n for input_ in inputs:\n output = runnable.run(input_)\n outputs.append(output)\n\n return EvaluationResult(runnable, inputs, outputs, expected_outputs)\n", "path": "haystack/evaluation/eval.py"}]} | 2,284 | 912 |
gh_patches_debug_27319 | rasdani/github-patches | git_diff | medtagger__MedTagger-306 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow backend to return Slices in reverse order
## Expected Behavior
When user moves slider down, backend should send Slices in reverse order, so that UI will be able to show them first.
## Actual Behavior
Backend always send Slices in ascending order.
## Steps to Reproduce the Problem
1. Go to the marker page.
2. Move to the bottom of current view (let's assume that the last Slice on which you are now has index N).
3. UI will request backend to send Slices from range (N-10, N-1).
4. Backend will send Slices **in order**: (N-10, N-9, N-8, ..., N-1).
5. Marker will add (N-10)th Slice to the view from above response.
6. Marker will allow user to move between all Slices in range from N-10 but Slices (N-9, N-8, ...) won't be loaded yet!
## Additional comment
Marker should request backend to send Slices in descending order, so that it will be able to load them to the marker first. Such case should be enabled **only** if user wants to go back/down!
To debug this case, slow your Internet connection down in your browser's dev tools or apply huge load on the backend server.
</issue>
<code>
[start of backend/medtagger/api/scans/service_web_socket.py]
1 """Module responsible for definition of Scans service available via WebSockets."""
2 from typing import Dict
3
4 from flask_socketio import Namespace, emit
5
6 from medtagger.api import web_socket
7 from medtagger.database.models import SliceOrientation
8 from medtagger.types import ScanID
9 from medtagger.api.exceptions import InvalidArgumentsException
10 from medtagger.api.scans import business
11
12
13 class Slices(Namespace):
14 """WebSocket handler for /slices namespace."""
15
16 MAX_NUMBER_OF_SLICES_PER_REQUEST = 25
17
18 def on_request_slices(self, request: Dict) -> None:
19 """Handle slices request triggered by `request_slices` event."""
20 assert request.get('scan_id'), 'ScanID is required!'
21 scan_id = ScanID(str(request['scan_id']))
22 begin = max(0, request.get('begin', 0))
23 count = request.get('count', 1)
24 orientation = request.get('orientation', SliceOrientation.Z.value)
25 self._raise_on_invalid_request_slices(count, orientation)
26
27 orientation = SliceOrientation[orientation]
28 slices = business.get_slices_for_scan(scan_id, begin, count, orientation=orientation)
29 for index, (_slice, image) in enumerate(slices):
30 emit('slice', {'scan_id': scan_id, 'index': begin + index, 'image': image})
31
32 def _raise_on_invalid_request_slices(self, count: int, orientation: str) -> None:
33 """Validate incoming request and raise an exception if there are issues with given arguments.
34
35 :param count: number of slices that should be returned
36 :param orientation: Slice's orientation as a string
37 """
38 # Make sure that passed orientation is proper one
39 if orientation not in SliceOrientation.__members__:
40 raise InvalidArgumentsException('Invalid Slice orientation.')
41
42 # Make sure that nobody will fetch whole scan at once. It could freeze our backend application.
43 if count > self.MAX_NUMBER_OF_SLICES_PER_REQUEST:
44 message = 'Cannot return more than {} slices per request.'.format(self.MAX_NUMBER_OF_SLICES_PER_REQUEST)
45 raise InvalidArgumentsException(message)
46
47
48 # Register above namespace
49 web_socket.on_namespace(Slices('/slices'))
50
[end of backend/medtagger/api/scans/service_web_socket.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/backend/medtagger/api/scans/service_web_socket.py b/backend/medtagger/api/scans/service_web_socket.py
--- a/backend/medtagger/api/scans/service_web_socket.py
+++ b/backend/medtagger/api/scans/service_web_socket.py
@@ -21,13 +21,21 @@
scan_id = ScanID(str(request['scan_id']))
begin = max(0, request.get('begin', 0))
count = request.get('count', 1)
+ reversed_order = request.get('reversed', False)
orientation = request.get('orientation', SliceOrientation.Z.value)
self._raise_on_invalid_request_slices(count, orientation)
orientation = SliceOrientation[orientation]
slices = business.get_slices_for_scan(scan_id, begin, count, orientation=orientation)
- for index, (_slice, image) in enumerate(slices):
- emit('slice', {'scan_id': scan_id, 'index': begin + index, 'image': image})
+ slices_to_send = reversed(list(enumerate(slices))) if reversed_order else enumerate(slices)
+ last_in_batch = begin if reversed_order else begin + count - 1
+ for index, (_slice, image) in slices_to_send:
+ emit('slice', {
+ 'scan_id': scan_id,
+ 'index': begin + index,
+ 'last_in_batch': last_in_batch,
+ 'image': image,
+ })
def _raise_on_invalid_request_slices(self, count: int, orientation: str) -> None:
"""Validate incoming request and raise an exception if there are issues with given arguments.
| {"golden_diff": "diff --git a/backend/medtagger/api/scans/service_web_socket.py b/backend/medtagger/api/scans/service_web_socket.py\n--- a/backend/medtagger/api/scans/service_web_socket.py\n+++ b/backend/medtagger/api/scans/service_web_socket.py\n@@ -21,13 +21,21 @@\n scan_id = ScanID(str(request['scan_id']))\n begin = max(0, request.get('begin', 0))\n count = request.get('count', 1)\n+ reversed_order = request.get('reversed', False)\n orientation = request.get('orientation', SliceOrientation.Z.value)\n self._raise_on_invalid_request_slices(count, orientation)\n \n orientation = SliceOrientation[orientation]\n slices = business.get_slices_for_scan(scan_id, begin, count, orientation=orientation)\n- for index, (_slice, image) in enumerate(slices):\n- emit('slice', {'scan_id': scan_id, 'index': begin + index, 'image': image})\n+ slices_to_send = reversed(list(enumerate(slices))) if reversed_order else enumerate(slices)\n+ last_in_batch = begin if reversed_order else begin + count - 1\n+ for index, (_slice, image) in slices_to_send:\n+ emit('slice', {\n+ 'scan_id': scan_id,\n+ 'index': begin + index,\n+ 'last_in_batch': last_in_batch,\n+ 'image': image,\n+ })\n \n def _raise_on_invalid_request_slices(self, count: int, orientation: str) -> None:\n \"\"\"Validate incoming request and raise an exception if there are issues with given arguments.\n", "issue": "Allow backend to return Slices in reverse order\n## Expected Behavior\r\n\r\nWhen user moves slider down, backend should send Slices in reverse order, so that UI will be able to show them first.\r\n\r\n## Actual Behavior\r\n\r\nBackend always send Slices in ascending order.\r\n\r\n## Steps to Reproduce the Problem\r\n\r\n 1. Go to the marker page.\r\n 2. Move to the bottom of current view (let's assume that the last Slice on which you are now has index N).\r\n 3. UI will request backend to send Slices from range (N-10, N-1).\r\n 4. Backend will send Slices **in order**: (N-10, N-9, N-8, ..., N-1).\r\n 5. Marker will add (N-10)th Slice to the view from above response.\r\n 6. Marker will allow user to move between all Slices in range from N-10 but Slices (N-9, N-8, ...) won't be loaded yet!\r\n\r\n## Additional comment\r\n\r\nMarker should request backend to send Slices in descending order, so that it will be able to load them to the marker first. Such case should be enabled **only** if user wants to go back/down!\r\n\r\nTo debug this case, slow your Internet connection down in your browser's dev tools or apply huge load on the backend server.\n", "before_files": [{"content": "\"\"\"Module responsible for definition of Scans service available via WebSockets.\"\"\"\nfrom typing import Dict\n\nfrom flask_socketio import Namespace, emit\n\nfrom medtagger.api import web_socket\nfrom medtagger.database.models import SliceOrientation\nfrom medtagger.types import ScanID\nfrom medtagger.api.exceptions import InvalidArgumentsException\nfrom medtagger.api.scans import business\n\n\nclass Slices(Namespace):\n \"\"\"WebSocket handler for /slices namespace.\"\"\"\n\n MAX_NUMBER_OF_SLICES_PER_REQUEST = 25\n\n def on_request_slices(self, request: Dict) -> None:\n \"\"\"Handle slices request triggered by `request_slices` event.\"\"\"\n assert request.get('scan_id'), 'ScanID is required!'\n scan_id = ScanID(str(request['scan_id']))\n begin = max(0, request.get('begin', 0))\n count = request.get('count', 1)\n orientation = request.get('orientation', SliceOrientation.Z.value)\n self._raise_on_invalid_request_slices(count, orientation)\n\n orientation = SliceOrientation[orientation]\n slices = business.get_slices_for_scan(scan_id, begin, count, orientation=orientation)\n for index, (_slice, image) in enumerate(slices):\n emit('slice', {'scan_id': scan_id, 'index': begin + index, 'image': image})\n\n def _raise_on_invalid_request_slices(self, count: int, orientation: str) -> None:\n \"\"\"Validate incoming request and raise an exception if there are issues with given arguments.\n\n :param count: number of slices that should be returned\n :param orientation: Slice's orientation as a string\n \"\"\"\n # Make sure that passed orientation is proper one\n if orientation not in SliceOrientation.__members__:\n raise InvalidArgumentsException('Invalid Slice orientation.')\n\n # Make sure that nobody will fetch whole scan at once. It could freeze our backend application.\n if count > self.MAX_NUMBER_OF_SLICES_PER_REQUEST:\n message = 'Cannot return more than {} slices per request.'.format(self.MAX_NUMBER_OF_SLICES_PER_REQUEST)\n raise InvalidArgumentsException(message)\n\n\n# Register above namespace\nweb_socket.on_namespace(Slices('/slices'))\n", "path": "backend/medtagger/api/scans/service_web_socket.py"}]} | 1,394 | 357 |
gh_patches_debug_23887 | rasdani/github-patches | git_diff | iterative__dvc-1047 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Custom credentialpath not working
Credentialpath is not working with the following config:
```
[core]
remote = models
cloud = AWS
['remote "models"']
url = s3://<bucket-name>
credentialpath = /home/ubuntu/Models/.dvc/aws_credentials
```
Works with the default path:
`credentialpath = /home/ubuntu/.aws/credentials`
The credentials files are identical.
Setup:
DVC: 0.17.0
Installation: pip3 install dvc[all]
OS: Ubuntu 16.04.4
</issue>
<code>
[start of dvc/remote/s3.py]
1 import os
2 import threading
3 import posixpath
4
5 try:
6 import boto3
7 except ImportError:
8 boto3 = None
9
10 try:
11 from urlparse import urlparse
12 except ImportError:
13 from urllib.parse import urlparse
14
15 from dvc.logger import Logger
16 from dvc.progress import progress
17 from dvc.config import Config
18 from dvc.remote.base import RemoteBase
19 from dvc.remote.local import RemoteLOCAL
20 from dvc.exceptions import DvcException
21
22
23 class Callback(object):
24 def __init__(self, name, total):
25 self.name = name
26 self.total = total
27 self.current = 0
28 self.lock = threading.Lock()
29
30 def __call__(self, byts):
31 with self.lock:
32 self.current += byts
33 progress.update_target(self.name, self.current, self.total)
34
35
36 class RemoteS3(RemoteBase):
37 scheme = 's3'
38 REGEX = r'^s3://(?P<path>.*)$'
39 REQUIRES = {'boto3': boto3}
40 PARAM_ETAG = 'etag'
41
42 def __init__(self, project, config):
43 import configobj
44
45 self.project = project
46 storagepath = 's3://' + config.get(Config.SECTION_AWS_STORAGEPATH,
47 '').lstrip('/')
48 self.url = config.get(Config.SECTION_REMOTE_URL, storagepath)
49 self.region = config.get(Config.SECTION_AWS_REGION, None)
50 self.profile = config.get(Config.SECTION_AWS_PROFILE, 'default')
51 self.endpoint_url = config.get(Config.SECTION_AWS_ENDPOINT_URL, None)
52
53 credentialpath = config.get(Config.SECTION_AWS_CREDENTIALPATH, None)
54 if credentialpath:
55 creds_conf = configobj.ConfigObj(credentialpath)
56 creds = creds_conf.get(self.profile, {})
57 else:
58 creds = {}
59
60 self.region = creds.get('region', self.region)
61 self.aws_access_key_id = creds.get('aws_access_key_id', None)
62 self.aws_secret_access_key = creds.get('aws_secret_access_key', None)
63
64 @property
65 def bucket(self):
66 return urlparse(self.url).netloc
67
68 @property
69 def prefix(self):
70 return urlparse(self.url).path.lstrip('/')
71
72 @property
73 def s3(self):
74 session = boto3.session.Session(profile_name=self.profile)
75 return session.client('s3',
76 aws_access_key_id=self.aws_access_key_id,
77 aws_secret_access_key=self.aws_secret_access_key,
78 region_name=self.region,
79 endpoint_url=self.endpoint_url)
80
81 def get_etag(self, bucket, key):
82 try:
83 obj = self.s3.head_object(Bucket=bucket, Key=key)
84 except Exception:
85 raise DvcException('s3://{}/{} does not exist'.format(bucket, key))
86
87 return obj['ETag'].strip('"')
88
89 def save_info(self, path_info):
90 if path_info['scheme'] != 's3':
91 raise NotImplementedError
92
93 return {self.PARAM_ETAG: self.get_etag(path_info['bucket'],
94 path_info['key'])}
95
96 def changed(self, path_info, checksum_info):
97 if not self.exists([path_info])[0]:
98 return True
99
100 etag = checksum_info.get(self.PARAM_ETAG, None)
101 if etag is None:
102 return True
103
104 if self.changed_cache(etag):
105 return True
106
107 return checksum_info != self.save_info(path_info)
108
109 def _copy(self, from_info, to_info, s3=None):
110 s3 = s3 if s3 else self.s3
111
112 source = {'Bucket': from_info['bucket'],
113 'Key': from_info['key']}
114 self.s3.copy(source, to_info['bucket'], to_info['key'])
115
116 def save(self, path_info):
117 if path_info['scheme'] != 's3':
118 raise NotImplementedError
119
120 etag = self.get_etag(path_info['bucket'], path_info['key'])
121 key = posixpath.join(self.prefix, etag[0:2], etag[2:])
122 to_info = {'scheme': 's3', 'bucket': self.bucket, 'key': key}
123
124 self._copy(path_info, to_info)
125
126 return {self.PARAM_ETAG: etag}
127
128 @staticmethod
129 def to_string(path_info):
130 return "s3://{}/{}".format(path_info['bucket'], path_info['key'])
131
132 def changed_cache(self, etag):
133 key = posixpath.join(self.prefix, etag[0:2], etag[2:])
134 cache = {'scheme': 's3', 'bucket': self.bucket, 'key': key}
135
136 if {self.PARAM_ETAG: etag} != self.save_info(cache):
137 if self.exists([cache])[0]:
138 msg = 'Corrupted cache file {}'
139 Logger.warn(msg.format(self.to_string(cache)))
140 self.remove(cache)
141 return True
142
143 return False
144
145 def checkout(self, path_info, checksum_info):
146 if path_info['scheme'] != 's3':
147 raise NotImplementedError
148
149 etag = checksum_info.get(self.PARAM_ETAG, None)
150 if not etag:
151 return
152
153 if not self.changed(path_info, checksum_info):
154 msg = "Data '{}' didn't change."
155 Logger.info(msg.format(self.to_string(path_info)))
156 return
157
158 if self.changed_cache(etag):
159 msg = "Cache '{}' not found. File '{}' won't be created."
160 Logger.warn(msg.format(etag, self.to_string(path_info)))
161 return
162
163 if self.exists([path_info])[0]:
164 msg = "Data '{}' exists. Removing before checkout."
165 Logger.warn(msg.format(self.to_string(path_info)))
166 self.remove(path_info)
167 return
168
169 msg = "Checking out '{}' with cache '{}'."
170 Logger.info(msg.format(self.to_string(path_info), etag))
171
172 key = posixpath.join(self.prefix, etag[0:2], etag[2:])
173 from_info = {'scheme': 's3', 'bucket': self.bucket, 'key': key}
174
175 self._copy(from_info, path_info)
176
177 def remove(self, path_info):
178 if path_info['scheme'] != 's3':
179 raise NotImplementedError
180
181 Logger.debug('Removing s3://{}/{}'.format(path_info['bucket'],
182 path_info['key']))
183
184 self.s3.delete_object(Bucket=path_info['bucket'],
185 Key=path_info['key'])
186
187 def md5s_to_path_infos(self, md5s):
188 return [{'scheme': self.scheme,
189 'bucket': self.bucket,
190 'key': posixpath.join(self.prefix,
191 md5[0:2], md5[2:])} for md5 in md5s]
192
193 def _all_keys(self):
194 s3 = self.s3
195
196 keys = []
197 kwargs = {'Bucket': self.bucket,
198 'Prefix': self.prefix}
199 while True:
200 resp = s3.list_objects_v2(**kwargs)
201 contents = resp.get('Contents', None)
202 if not contents:
203 break
204
205 for obj in contents:
206 keys.append(obj['Key'])
207
208 token = resp.get('NextContinuationToken', None)
209 if not token:
210 break
211
212 kwargs['ContinuationToken'] = token
213
214 return keys
215
216 def exists(self, path_infos):
217 # NOTE: We mostly use exists() method when filtering a bulk of cache
218 # files to decide if we need to download/upload them and in s3
219 # list_objects_v2() is much-much faster than trying to check keys
220 # one-by-one.
221 ret = []
222
223 keys = self._all_keys()
224
225 for path_info in path_infos:
226 exists = False
227 if path_info['key'] in keys:
228 exists = True
229 ret.append(exists)
230
231 return ret
232
233 def upload(self, from_infos, to_infos, names=None):
234 names = self._verify_path_args(to_infos, from_infos, names)
235
236 s3 = self.s3
237
238 for from_info, to_info, name in zip(from_infos, to_infos, names):
239 if to_info['scheme'] != 's3':
240 raise NotImplementedError
241
242 if from_info['scheme'] != 'local':
243 raise NotImplementedError
244
245 Logger.debug("Uploading '{}' to '{}/{}'".format(from_info['path'],
246 to_info['bucket'],
247 to_info['key']))
248
249 if not name:
250 name = os.path.basename(from_info['path'])
251
252 total = os.path.getsize(from_info['path'])
253 cb = Callback(name, total)
254
255 try:
256 s3.upload_file(from_info['path'],
257 to_info['bucket'],
258 to_info['key'],
259 Callback=cb)
260 except Exception as exc:
261 msg = "Failed to upload '{}'".format(from_info['path'])
262 Logger.error(msg, exc)
263 continue
264
265 progress.finish_target(name)
266
267 def download(self,
268 from_infos,
269 to_infos,
270 no_progress_bar=False,
271 names=None):
272 names = self._verify_path_args(from_infos, to_infos, names)
273
274 s3 = self.s3
275
276 for to_info, from_info, name in zip(to_infos, from_infos, names):
277 if from_info['scheme'] != 's3':
278 raise NotImplementedError
279
280 if to_info['scheme'] == 's3':
281 self._copy(from_info, to_info, s3=s3)
282 continue
283
284 if to_info['scheme'] != 'local':
285 raise NotImplementedError
286
287 msg = "Downloading '{}/{}' to '{}'".format(from_info['bucket'],
288 from_info['key'],
289 to_info['path'])
290 Logger.debug(msg)
291
292 tmp_file = self.tmp_file(to_info['path'])
293 if not name:
294 name = os.path.basename(to_info['path'])
295
296 if no_progress_bar:
297 cb = None
298 else:
299 total = s3.head_object(Bucket=from_info['bucket'],
300 Key=from_info['key'])['ContentLength']
301 cb = Callback(name, total)
302
303 self._makedirs(to_info['path'])
304
305 try:
306 s3.download_file(from_info['bucket'],
307 from_info['key'],
308 tmp_file,
309 Callback=cb)
310 except Exception as exc:
311 msg = "Failed to download '{}/{}'".format(from_info['bucket'],
312 from_info['key'])
313 Logger.error(msg, exc)
314 return
315
316 os.rename(tmp_file, to_info['path'])
317
318 if not no_progress_bar:
319 progress.finish_target(name)
320
321 def _path_to_etag(self, path):
322 relpath = posixpath.relpath(path, self.prefix)
323 return posixpath.dirname(relpath) + posixpath.basename(relpath)
324
325 def _all(self):
326 keys = self._all_keys()
327 return [self._path_to_etag(key) for key in keys]
328
329 def gc(self, cinfos):
330 used_etags = [info[self.PARAM_ETAG] for info in cinfos['s3']]
331 used_etags += [info[RemoteLOCAL.PARAM_MD5] for info in cinfos['local']]
332
333 removed = False
334 for etag in self._all():
335 if etag in used_etags:
336 continue
337 path_info = {'scheme': 's3',
338 'key': posixpath.join(self.prefix,
339 etag[0:2], etag[2:]),
340 'bucket': self.bucket}
341 self.remove(path_info)
342 removed = True
343
344 return removed
345
[end of dvc/remote/s3.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/remote/s3.py b/dvc/remote/s3.py
--- a/dvc/remote/s3.py
+++ b/dvc/remote/s3.py
@@ -57,6 +57,7 @@
else:
creds = {}
+ self.creds = creds
self.region = creds.get('region', self.region)
self.aws_access_key_id = creds.get('aws_access_key_id', None)
self.aws_secret_access_key = creds.get('aws_secret_access_key', None)
@@ -71,12 +72,14 @@
@property
def s3(self):
- session = boto3.session.Session(profile_name=self.profile)
- return session.client('s3',
- aws_access_key_id=self.aws_access_key_id,
- aws_secret_access_key=self.aws_secret_access_key,
- region_name=self.region,
- endpoint_url=self.endpoint_url)
+ if not self.creds:
+ session = boto3.session.Session(profile_name=self.profile)
+ else:
+ session = boto3.session.Session(
+ aws_access_key_id=self.aws_access_key_id,
+ aws_secret_access_key=self.aws_secret_access_key,
+ region_name=self.region)
+ return session.client('s3', endpoint_url=self.endpoint_url)
def get_etag(self, bucket, key):
try:
| {"golden_diff": "diff --git a/dvc/remote/s3.py b/dvc/remote/s3.py\n--- a/dvc/remote/s3.py\n+++ b/dvc/remote/s3.py\n@@ -57,6 +57,7 @@\n else:\n creds = {}\n \n+ self.creds = creds\n self.region = creds.get('region', self.region)\n self.aws_access_key_id = creds.get('aws_access_key_id', None)\n self.aws_secret_access_key = creds.get('aws_secret_access_key', None)\n@@ -71,12 +72,14 @@\n \n @property\n def s3(self):\n- session = boto3.session.Session(profile_name=self.profile)\n- return session.client('s3',\n- aws_access_key_id=self.aws_access_key_id,\n- aws_secret_access_key=self.aws_secret_access_key,\n- region_name=self.region,\n- endpoint_url=self.endpoint_url)\n+ if not self.creds:\n+ session = boto3.session.Session(profile_name=self.profile)\n+ else:\n+ session = boto3.session.Session(\n+ aws_access_key_id=self.aws_access_key_id,\n+ aws_secret_access_key=self.aws_secret_access_key,\n+ region_name=self.region)\n+ return session.client('s3', endpoint_url=self.endpoint_url)\n \n def get_etag(self, bucket, key):\n try:\n", "issue": "Custom credentialpath not working\nCredentialpath is not working with the following config:\r\n\r\n```\r\n[core]\r\nremote = models\r\ncloud = AWS\r\n['remote \"models\"']\r\nurl = s3://<bucket-name>\r\ncredentialpath = /home/ubuntu/Models/.dvc/aws_credentials\r\n```\r\n\r\nWorks with the default path:\r\n\r\n`credentialpath = /home/ubuntu/.aws/credentials`\r\n\r\nThe credentials files are identical.\r\n\r\nSetup:\r\n\r\nDVC: 0.17.0\r\nInstallation: pip3 install dvc[all]\r\nOS: Ubuntu 16.04.4\n", "before_files": [{"content": "import os\nimport threading\nimport posixpath\n\ntry:\n import boto3\nexcept ImportError:\n boto3 = None\n\ntry:\n from urlparse import urlparse\nexcept ImportError:\n from urllib.parse import urlparse\n\nfrom dvc.logger import Logger\nfrom dvc.progress import progress\nfrom dvc.config import Config\nfrom dvc.remote.base import RemoteBase\nfrom dvc.remote.local import RemoteLOCAL\nfrom dvc.exceptions import DvcException\n\n\nclass Callback(object):\n def __init__(self, name, total):\n self.name = name\n self.total = total\n self.current = 0\n self.lock = threading.Lock()\n\n def __call__(self, byts):\n with self.lock:\n self.current += byts\n progress.update_target(self.name, self.current, self.total)\n\n\nclass RemoteS3(RemoteBase):\n scheme = 's3'\n REGEX = r'^s3://(?P<path>.*)$'\n REQUIRES = {'boto3': boto3}\n PARAM_ETAG = 'etag'\n\n def __init__(self, project, config):\n import configobj\n\n self.project = project\n storagepath = 's3://' + config.get(Config.SECTION_AWS_STORAGEPATH,\n '').lstrip('/')\n self.url = config.get(Config.SECTION_REMOTE_URL, storagepath)\n self.region = config.get(Config.SECTION_AWS_REGION, None)\n self.profile = config.get(Config.SECTION_AWS_PROFILE, 'default')\n self.endpoint_url = config.get(Config.SECTION_AWS_ENDPOINT_URL, None)\n\n credentialpath = config.get(Config.SECTION_AWS_CREDENTIALPATH, None)\n if credentialpath:\n creds_conf = configobj.ConfigObj(credentialpath)\n creds = creds_conf.get(self.profile, {})\n else:\n creds = {}\n\n self.region = creds.get('region', self.region)\n self.aws_access_key_id = creds.get('aws_access_key_id', None)\n self.aws_secret_access_key = creds.get('aws_secret_access_key', None)\n\n @property\n def bucket(self):\n return urlparse(self.url).netloc\n\n @property\n def prefix(self):\n return urlparse(self.url).path.lstrip('/')\n\n @property\n def s3(self):\n session = boto3.session.Session(profile_name=self.profile)\n return session.client('s3',\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key,\n region_name=self.region,\n endpoint_url=self.endpoint_url)\n\n def get_etag(self, bucket, key):\n try:\n obj = self.s3.head_object(Bucket=bucket, Key=key)\n except Exception:\n raise DvcException('s3://{}/{} does not exist'.format(bucket, key))\n\n return obj['ETag'].strip('\"')\n\n def save_info(self, path_info):\n if path_info['scheme'] != 's3':\n raise NotImplementedError\n\n return {self.PARAM_ETAG: self.get_etag(path_info['bucket'],\n path_info['key'])}\n\n def changed(self, path_info, checksum_info):\n if not self.exists([path_info])[0]:\n return True\n\n etag = checksum_info.get(self.PARAM_ETAG, None)\n if etag is None:\n return True\n\n if self.changed_cache(etag):\n return True\n\n return checksum_info != self.save_info(path_info)\n\n def _copy(self, from_info, to_info, s3=None):\n s3 = s3 if s3 else self.s3\n\n source = {'Bucket': from_info['bucket'],\n 'Key': from_info['key']}\n self.s3.copy(source, to_info['bucket'], to_info['key'])\n\n def save(self, path_info):\n if path_info['scheme'] != 's3':\n raise NotImplementedError\n\n etag = self.get_etag(path_info['bucket'], path_info['key'])\n key = posixpath.join(self.prefix, etag[0:2], etag[2:])\n to_info = {'scheme': 's3', 'bucket': self.bucket, 'key': key}\n\n self._copy(path_info, to_info)\n\n return {self.PARAM_ETAG: etag}\n\n @staticmethod\n def to_string(path_info):\n return \"s3://{}/{}\".format(path_info['bucket'], path_info['key'])\n\n def changed_cache(self, etag):\n key = posixpath.join(self.prefix, etag[0:2], etag[2:])\n cache = {'scheme': 's3', 'bucket': self.bucket, 'key': key}\n\n if {self.PARAM_ETAG: etag} != self.save_info(cache):\n if self.exists([cache])[0]:\n msg = 'Corrupted cache file {}'\n Logger.warn(msg.format(self.to_string(cache)))\n self.remove(cache)\n return True\n\n return False\n\n def checkout(self, path_info, checksum_info):\n if path_info['scheme'] != 's3':\n raise NotImplementedError\n\n etag = checksum_info.get(self.PARAM_ETAG, None)\n if not etag:\n return\n\n if not self.changed(path_info, checksum_info):\n msg = \"Data '{}' didn't change.\"\n Logger.info(msg.format(self.to_string(path_info)))\n return\n\n if self.changed_cache(etag):\n msg = \"Cache '{}' not found. File '{}' won't be created.\"\n Logger.warn(msg.format(etag, self.to_string(path_info)))\n return\n\n if self.exists([path_info])[0]:\n msg = \"Data '{}' exists. Removing before checkout.\"\n Logger.warn(msg.format(self.to_string(path_info)))\n self.remove(path_info)\n return\n\n msg = \"Checking out '{}' with cache '{}'.\"\n Logger.info(msg.format(self.to_string(path_info), etag))\n\n key = posixpath.join(self.prefix, etag[0:2], etag[2:])\n from_info = {'scheme': 's3', 'bucket': self.bucket, 'key': key}\n\n self._copy(from_info, path_info)\n\n def remove(self, path_info):\n if path_info['scheme'] != 's3':\n raise NotImplementedError\n\n Logger.debug('Removing s3://{}/{}'.format(path_info['bucket'],\n path_info['key']))\n\n self.s3.delete_object(Bucket=path_info['bucket'],\n Key=path_info['key'])\n\n def md5s_to_path_infos(self, md5s):\n return [{'scheme': self.scheme,\n 'bucket': self.bucket,\n 'key': posixpath.join(self.prefix,\n md5[0:2], md5[2:])} for md5 in md5s]\n\n def _all_keys(self):\n s3 = self.s3\n\n keys = []\n kwargs = {'Bucket': self.bucket,\n 'Prefix': self.prefix}\n while True:\n resp = s3.list_objects_v2(**kwargs)\n contents = resp.get('Contents', None)\n if not contents:\n break\n\n for obj in contents:\n keys.append(obj['Key'])\n\n token = resp.get('NextContinuationToken', None)\n if not token:\n break\n\n kwargs['ContinuationToken'] = token\n\n return keys\n\n def exists(self, path_infos):\n # NOTE: We mostly use exists() method when filtering a bulk of cache\n # files to decide if we need to download/upload them and in s3\n # list_objects_v2() is much-much faster than trying to check keys\n # one-by-one.\n ret = []\n\n keys = self._all_keys()\n\n for path_info in path_infos:\n exists = False\n if path_info['key'] in keys:\n exists = True\n ret.append(exists)\n\n return ret\n\n def upload(self, from_infos, to_infos, names=None):\n names = self._verify_path_args(to_infos, from_infos, names)\n\n s3 = self.s3\n\n for from_info, to_info, name in zip(from_infos, to_infos, names):\n if to_info['scheme'] != 's3':\n raise NotImplementedError\n\n if from_info['scheme'] != 'local':\n raise NotImplementedError\n\n Logger.debug(\"Uploading '{}' to '{}/{}'\".format(from_info['path'],\n to_info['bucket'],\n to_info['key']))\n\n if not name:\n name = os.path.basename(from_info['path'])\n\n total = os.path.getsize(from_info['path'])\n cb = Callback(name, total)\n\n try:\n s3.upload_file(from_info['path'],\n to_info['bucket'],\n to_info['key'],\n Callback=cb)\n except Exception as exc:\n msg = \"Failed to upload '{}'\".format(from_info['path'])\n Logger.error(msg, exc)\n continue\n\n progress.finish_target(name)\n\n def download(self,\n from_infos,\n to_infos,\n no_progress_bar=False,\n names=None):\n names = self._verify_path_args(from_infos, to_infos, names)\n\n s3 = self.s3\n\n for to_info, from_info, name in zip(to_infos, from_infos, names):\n if from_info['scheme'] != 's3':\n raise NotImplementedError\n\n if to_info['scheme'] == 's3':\n self._copy(from_info, to_info, s3=s3)\n continue\n\n if to_info['scheme'] != 'local':\n raise NotImplementedError\n\n msg = \"Downloading '{}/{}' to '{}'\".format(from_info['bucket'],\n from_info['key'],\n to_info['path'])\n Logger.debug(msg)\n\n tmp_file = self.tmp_file(to_info['path'])\n if not name:\n name = os.path.basename(to_info['path'])\n\n if no_progress_bar:\n cb = None\n else:\n total = s3.head_object(Bucket=from_info['bucket'],\n Key=from_info['key'])['ContentLength']\n cb = Callback(name, total)\n\n self._makedirs(to_info['path'])\n\n try:\n s3.download_file(from_info['bucket'],\n from_info['key'],\n tmp_file,\n Callback=cb)\n except Exception as exc:\n msg = \"Failed to download '{}/{}'\".format(from_info['bucket'],\n from_info['key'])\n Logger.error(msg, exc)\n return\n\n os.rename(tmp_file, to_info['path'])\n\n if not no_progress_bar:\n progress.finish_target(name)\n\n def _path_to_etag(self, path):\n relpath = posixpath.relpath(path, self.prefix)\n return posixpath.dirname(relpath) + posixpath.basename(relpath)\n\n def _all(self):\n keys = self._all_keys()\n return [self._path_to_etag(key) for key in keys]\n\n def gc(self, cinfos):\n used_etags = [info[self.PARAM_ETAG] for info in cinfos['s3']]\n used_etags += [info[RemoteLOCAL.PARAM_MD5] for info in cinfos['local']]\n\n removed = False\n for etag in self._all():\n if etag in used_etags:\n continue\n path_info = {'scheme': 's3',\n 'key': posixpath.join(self.prefix,\n etag[0:2], etag[2:]),\n 'bucket': self.bucket}\n self.remove(path_info)\n removed = True\n\n return removed\n", "path": "dvc/remote/s3.py"}]} | 4,068 | 295 |
gh_patches_debug_18774 | rasdani/github-patches | git_diff | kornia__kornia-1284 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Importing solve function from PyTorch fails for edge case version
### Describe the bug
Hi there,
We're running the PyTorch version labeled `1.7.1+cu110`, but it is giving us issues when importing Kornia because of the following check in `_compat.py`:
```
if version.parse(torch.__version__) > version.parse("1.7.1"):
# TODO: remove the type: ignore once Python 3.6 is deprecated.
# It turns out that Pytorch has no attribute `torch.linalg` for
# Python 3.6 / PyTorch 1.7.0, 1.7.1
from torch.linalg import solve # type: ignore
else:
from torch import solve as _solve
# NOTE: in previous versions `torch.solve` accepted arguments in another order.
def solve(A: torch.Tensor, B: torch.Tensor) -> torch.Tensor:
return _solve(B, A).solution
```
This check appears to believe that our version is > 1.7.1 and therefore attempts to import the wrong `solve` function.
### Reproduction steps
```bash
1. Get PyTorch 1.7.1+anything (cu110, cu101, cu92, cpu)
2. Attempt to import Kornia
```
### Expected behavior
Importing Kornia with this version should not fail on a PyTorch version check. Perhaps it would make sense to split on `+` before checking the version.
### Environment
```shell
- PyTorch Version (e.g., 1.0): 1.7.1+cu110
- OS (e.g., Linux): Ubuntu 20.04
- How you installed PyTorch (`conda`, `pip`, source): `pip`
- Build command you used (if compiling from source): -
- Python version: 3.7.5
- CUDA/cuDNN version: -
- GPU models and configuration: -
- Any other relevant information: -
```
### Additional context
_No response_
</issue>
<code>
[start of setup.py]
1 # Welcome to the Kornia setup.py.
2 #
3 import distutils.command.clean
4 import glob
5 import io
6 import os
7 import shutil
8 import subprocess
9 import sys
10
11 from setuptools import find_packages, setup
12
13 ################
14 # The variables below define the current version under
15 # development and the current pytorch supported versions.
16 # WARNING: Becareful and do not touch those variables,
17 # unless you are a maintainer. Otherwise, could brake
18 # the package backward compatibility.
19
20 # NOTE(maintainers): modify this variable each time you do a release
21
22 version = '0.5.10' # this a tag for the current development version
23
24
25 # NOTE(maintainers): update this dictionary each time you do a release
26 # When multiple pytorch versions are associated with a single version of kornia,
27 # the oldest one is the requirement. The versions should be inequalities.
28 # Once a pytorch version (in the future) breaks a kornia version, we could just
29 # add a maximal version.
30 kornia_pt_dependencies = {
31 '0.5.10': '>=1.6.0',
32 '0.5.9': '>=1.6.0',
33 '0.5.8': '>=1.6.0',
34 '0.5.7': '>=1.6.0',
35 '0.5.6': '>=1.6.0',
36 '0.5.5': '>=1.6.0',
37 '0.5.4': '>=1.6.0',
38 '0.5.3': '>=1.6.0',
39 '0.5.2': '>=1.6.0',
40 '0.5.1': '>=1.6.0',
41 '0.5.0': '>=1.6.0',
42 '0.4.2': '>=1.5.1',
43 '0.4.1': '>=1.6.0',
44 '0.4.0': '>=1.6.0,<1.7.0',
45 '0.3.2': '>=1.5.0,<1.6.0',
46 '0.3.1': '>=1.5.0',
47 '0.2.2': '>=1.4.0',
48 '0.1.4': '>=1.2.0',
49 }
50
51
52 # version can be overridden eg with KORNIA_BUILD_VERSION so we map each possible kornia version to the dictionary keys
53 def dep_version(version):
54 compatible_versions = [v for v in kornia_pt_dependencies if v >= version]
55 compatible_versions += [sorted(kornia_pt_dependencies)[-1]]
56 return min(compatible_versions)
57
58
59 #################################
60
61 sha = 'Unknown'
62 package_name = 'kornia'
63
64 cwd = os.path.dirname(os.path.abspath(__file__))
65
66 try:
67 sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()
68 except subprocess.CalledProcessError:
69 pass
70
71 if os.getenv('KORNIA_BUILD_VERSION'):
72 version = os.getenv('KORNIA_BUILD_VERSION')
73 elif os.getenv('KORNIA_RELEASE'):
74 pass
75 elif sha != 'Unknown':
76 version += '+' + sha[:7]
77 print(f"Building wheel {package_name}-{version}")
78
79
80 def write_version_file():
81 version_path = os.path.join(cwd, 'kornia', 'version.py')
82 with open(version_path, 'w') as f:
83 f.write(f"__version__ = '{version}'\n")
84 f.write(f"git_version = {repr(sha)}\n")
85
86
87 def read(*names, **kwargs):
88 with open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")) as fp:
89 return fp.read()
90
91
92 # open readme file and set long description
93 with open("README.md", encoding="utf-8") as fh:
94 long_description = fh.read()
95
96
97 class clean(distutils.command.clean.clean):
98 def run(self):
99 with open('.gitignore') as f:
100 ignores = f.read()
101 for wildcard in filter(None, ignores.split('\n')):
102 for filename in glob.glob(wildcard):
103 try:
104 os.remove(filename)
105 except OSError:
106 shutil.rmtree(filename, ignore_errors=True)
107
108 # It's an old-style class in Python 2.7...
109 distutils.command.clean.clean.run(self)
110
111 # remove compiled and temporary files
112 subprocess.call(['rm -rf dist/ build/ kornia.egg*'], shell=True)
113
114
115 requirements = ['torch' + kornia_pt_dependencies[dep_version(version)]]
116
117
118 if __name__ == '__main__':
119 write_version_file()
120 setup(
121 # Metadata
122 name=package_name,
123 version=version,
124 author='Edgar Riba',
125 author_email='[email protected]',
126 url='https://www.kornia.org',
127 download_url='https://github.com/kornia/kornia',
128 license='Apache License 2.0',
129 description='Open Source Differentiable Computer Vision Library for PyTorch',
130 long_description=long_description,
131 long_description_content_type='text/markdown',
132 python_requires='>=3.6',
133 setup_requires=['pytest-runner'],
134 tests_require=['pytest'],
135 packages=find_packages(exclude=('docs', 'test', 'examples')),
136 package_data={"kornia": ["py.typed"]},
137 zip_safe=True,
138 install_requires=requirements,
139 keywords=['computer vision', 'deep learning', 'pytorch'],
140 project_urls={
141 "Bug Tracker": "https://github.com/kornia/kornia/issues",
142 "Documentation": "https://kornia.readthedocs.io/en/latest",
143 "Source Code": "https://github.com/kornia/kornia",
144 },
145 classifiers=[
146 'Environment :: GPU',
147 'Environment :: Console',
148 'Natural Language :: English',
149 # How mature is this project? Common values are
150 # 3 - Alpha, 4 - Beta, 5 - Production/Stable
151 'Development Status :: 4 - Beta',
152 # Indicate who your project is intended for
153 'Intended Audience :: Developers',
154 'Intended Audience :: Education',
155 'Intended Audience :: Science/Research',
156 'Intended Audience :: Information Technology',
157 'Topic :: Software Development :: Libraries',
158 'Topic :: Scientific/Engineering :: Artificial Intelligence',
159 'Topic :: Scientific/Engineering :: Image Processing',
160 # Pick your license as you wish
161 'License :: OSI Approved :: Apache Software License',
162 'Operating System :: OS Independent',
163 # Specify the Python versions you support here. In particular, ensure
164 # that you indicate whether you support Python 2, Python 3 or both.
165 'Programming Language :: Python :: 3',
166 'Programming Language :: Python :: 3.6',
167 'Programming Language :: Python :: 3.7',
168 'Programming Language :: Python :: 3.8',
169 ],
170 )
171
[end of setup.py]
[start of kornia/utils/_compat.py]
1 import torch
2
3 from packaging import version
4
5 if version.parse(torch.__version__) > version.parse("1.7.1"):
6 # TODO: remove the type: ignore once Python 3.6 is deprecated.
7 # It turns out that Pytorch has no attribute `torch.linalg` for
8 # Python 3.6 / PyTorch 1.7.0, 1.7.1
9 from torch.linalg import solve # type: ignore
10 else:
11 from torch import solve as _solve
12
13 # NOTE: in previous versions `torch.solve` accepted arguments in another order.
14 def solve(A: torch.Tensor, B: torch.Tensor) -> torch.Tensor:
15 return _solve(B, A).solution
16
[end of kornia/utils/_compat.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kornia/utils/_compat.py b/kornia/utils/_compat.py
--- a/kornia/utils/_compat.py
+++ b/kornia/utils/_compat.py
@@ -2,7 +2,13 @@
from packaging import version
-if version.parse(torch.__version__) > version.parse("1.7.1"):
+
+def torch_version() -> str:
+ """Parse the `torch.__version__` variable and removes +cu*/cpu."""
+ return torch.__version__.split('+')[0]
+
+
+if version.parse(torch_version()) > version.parse("1.7.1"):
# TODO: remove the type: ignore once Python 3.6 is deprecated.
# It turns out that Pytorch has no attribute `torch.linalg` for
# Python 3.6 / PyTorch 1.7.0, 1.7.1
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -112,7 +112,10 @@
subprocess.call(['rm -rf dist/ build/ kornia.egg*'], shell=True)
-requirements = ['torch' + kornia_pt_dependencies[dep_version(version)]]
+requirements = [
+ 'torch' + kornia_pt_dependencies[dep_version(version)],
+ 'packaging' # REMOVE once we deprecate pytorch > 1.7.1. See: issue #1264
+]
if __name__ == '__main__':
| {"golden_diff": "diff --git a/kornia/utils/_compat.py b/kornia/utils/_compat.py\n--- a/kornia/utils/_compat.py\n+++ b/kornia/utils/_compat.py\n@@ -2,7 +2,13 @@\n \n from packaging import version\n \n-if version.parse(torch.__version__) > version.parse(\"1.7.1\"):\n+\n+def torch_version() -> str:\n+ \"\"\"Parse the `torch.__version__` variable and removes +cu*/cpu.\"\"\"\n+ return torch.__version__.split('+')[0]\n+\n+\n+if version.parse(torch_version()) > version.parse(\"1.7.1\"):\n # TODO: remove the type: ignore once Python 3.6 is deprecated.\n # It turns out that Pytorch has no attribute `torch.linalg` for\n # Python 3.6 / PyTorch 1.7.0, 1.7.1\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -112,7 +112,10 @@\n subprocess.call(['rm -rf dist/ build/ kornia.egg*'], shell=True)\n \n \n-requirements = ['torch' + kornia_pt_dependencies[dep_version(version)]]\n+requirements = [\n+ 'torch' + kornia_pt_dependencies[dep_version(version)],\n+ 'packaging' # REMOVE once we deprecate pytorch > 1.7.1. See: issue #1264\n+]\n \n \n if __name__ == '__main__':\n", "issue": "Importing solve function from PyTorch fails for edge case version \n### Describe the bug\r\n\r\nHi there,\r\n\r\nWe're running the PyTorch version labeled `1.7.1+cu110`, but it is giving us issues when importing Kornia because of the following check in `_compat.py`:\r\n\r\n```\r\nif version.parse(torch.__version__) > version.parse(\"1.7.1\"):\r\n # TODO: remove the type: ignore once Python 3.6 is deprecated.\r\n # It turns out that Pytorch has no attribute `torch.linalg` for\r\n # Python 3.6 / PyTorch 1.7.0, 1.7.1\r\n from torch.linalg import solve # type: ignore\r\nelse:\r\n from torch import solve as _solve\r\n\r\n # NOTE: in previous versions `torch.solve` accepted arguments in another order.\r\n def solve(A: torch.Tensor, B: torch.Tensor) -> torch.Tensor:\r\n return _solve(B, A).solution\r\n```\r\n\r\nThis check appears to believe that our version is > 1.7.1 and therefore attempts to import the wrong `solve` function.\r\n\r\n\r\n### Reproduction steps\r\n\r\n```bash\r\n1. Get PyTorch 1.7.1+anything (cu110, cu101, cu92, cpu) \r\n2. Attempt to import Kornia\r\n```\r\n\r\n\r\n### Expected behavior\r\n\r\nImporting Kornia with this version should not fail on a PyTorch version check. Perhaps it would make sense to split on `+` before checking the version.\r\n\r\n### Environment\r\n\r\n```shell\r\n- PyTorch Version (e.g., 1.0): 1.7.1+cu110\r\n- OS (e.g., Linux): Ubuntu 20.04\r\n- How you installed PyTorch (`conda`, `pip`, source): `pip`\r\n- Build command you used (if compiling from source): -\r\n- Python version: 3.7.5\r\n- CUDA/cuDNN version: -\r\n- GPU models and configuration: - \r\n- Any other relevant information: -\r\n```\r\n\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "# Welcome to the Kornia setup.py.\n#\nimport distutils.command.clean\nimport glob\nimport io\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nfrom setuptools import find_packages, setup\n\n################\n# The variables below define the current version under\n# development and the current pytorch supported versions.\n# WARNING: Becareful and do not touch those variables,\n# unless you are a maintainer. Otherwise, could brake\n# the package backward compatibility.\n\n# NOTE(maintainers): modify this variable each time you do a release\n\nversion = '0.5.10' # this a tag for the current development version\n\n\n# NOTE(maintainers): update this dictionary each time you do a release\n# When multiple pytorch versions are associated with a single version of kornia,\n# the oldest one is the requirement. The versions should be inequalities.\n# Once a pytorch version (in the future) breaks a kornia version, we could just\n# add a maximal version.\nkornia_pt_dependencies = {\n '0.5.10': '>=1.6.0',\n '0.5.9': '>=1.6.0',\n '0.5.8': '>=1.6.0',\n '0.5.7': '>=1.6.0',\n '0.5.6': '>=1.6.0',\n '0.5.5': '>=1.6.0',\n '0.5.4': '>=1.6.0',\n '0.5.3': '>=1.6.0',\n '0.5.2': '>=1.6.0',\n '0.5.1': '>=1.6.0',\n '0.5.0': '>=1.6.0',\n '0.4.2': '>=1.5.1',\n '0.4.1': '>=1.6.0',\n '0.4.0': '>=1.6.0,<1.7.0',\n '0.3.2': '>=1.5.0,<1.6.0',\n '0.3.1': '>=1.5.0',\n '0.2.2': '>=1.4.0',\n '0.1.4': '>=1.2.0',\n}\n\n\n# version can be overridden eg with KORNIA_BUILD_VERSION so we map each possible kornia version to the dictionary keys\ndef dep_version(version):\n compatible_versions = [v for v in kornia_pt_dependencies if v >= version]\n compatible_versions += [sorted(kornia_pt_dependencies)[-1]]\n return min(compatible_versions)\n\n\n#################################\n\nsha = 'Unknown'\npackage_name = 'kornia'\n\ncwd = os.path.dirname(os.path.abspath(__file__))\n\ntry:\n sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()\nexcept subprocess.CalledProcessError:\n pass\n\nif os.getenv('KORNIA_BUILD_VERSION'):\n version = os.getenv('KORNIA_BUILD_VERSION')\nelif os.getenv('KORNIA_RELEASE'):\n pass\nelif sha != 'Unknown':\n version += '+' + sha[:7]\nprint(f\"Building wheel {package_name}-{version}\")\n\n\ndef write_version_file():\n version_path = os.path.join(cwd, 'kornia', 'version.py')\n with open(version_path, 'w') as f:\n f.write(f\"__version__ = '{version}'\\n\")\n f.write(f\"git_version = {repr(sha)}\\n\")\n\n\ndef read(*names, **kwargs):\n with open(os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get(\"encoding\", \"utf8\")) as fp:\n return fp.read()\n\n\n# open readme file and set long description\nwith open(\"README.md\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\n\nclass clean(distutils.command.clean.clean):\n def run(self):\n with open('.gitignore') as f:\n ignores = f.read()\n for wildcard in filter(None, ignores.split('\\n')):\n for filename in glob.glob(wildcard):\n try:\n os.remove(filename)\n except OSError:\n shutil.rmtree(filename, ignore_errors=True)\n\n # It's an old-style class in Python 2.7...\n distutils.command.clean.clean.run(self)\n\n # remove compiled and temporary files\n subprocess.call(['rm -rf dist/ build/ kornia.egg*'], shell=True)\n\n\nrequirements = ['torch' + kornia_pt_dependencies[dep_version(version)]]\n\n\nif __name__ == '__main__':\n write_version_file()\n setup(\n # Metadata\n name=package_name,\n version=version,\n author='Edgar Riba',\n author_email='[email protected]',\n url='https://www.kornia.org',\n download_url='https://github.com/kornia/kornia',\n license='Apache License 2.0',\n description='Open Source Differentiable Computer Vision Library for PyTorch',\n long_description=long_description,\n long_description_content_type='text/markdown',\n python_requires='>=3.6',\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n packages=find_packages(exclude=('docs', 'test', 'examples')),\n package_data={\"kornia\": [\"py.typed\"]},\n zip_safe=True,\n install_requires=requirements,\n keywords=['computer vision', 'deep learning', 'pytorch'],\n project_urls={\n \"Bug Tracker\": \"https://github.com/kornia/kornia/issues\",\n \"Documentation\": \"https://kornia.readthedocs.io/en/latest\",\n \"Source Code\": \"https://github.com/kornia/kornia\",\n },\n classifiers=[\n 'Environment :: GPU',\n 'Environment :: Console',\n 'Natural Language :: English',\n # How mature is this project? Common values are\n # 3 - Alpha, 4 - Beta, 5 - Production/Stable\n 'Development Status :: 4 - Beta',\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Information Technology',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Image Processing',\n # Pick your license as you wish\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n )\n", "path": "setup.py"}, {"content": "import torch\n\nfrom packaging import version\n\nif version.parse(torch.__version__) > version.parse(\"1.7.1\"):\n # TODO: remove the type: ignore once Python 3.6 is deprecated.\n # It turns out that Pytorch has no attribute `torch.linalg` for\n # Python 3.6 / PyTorch 1.7.0, 1.7.1\n from torch.linalg import solve # type: ignore\nelse:\n from torch import solve as _solve\n\n # NOTE: in previous versions `torch.solve` accepted arguments in another order.\n def solve(A: torch.Tensor, B: torch.Tensor) -> torch.Tensor:\n return _solve(B, A).solution\n", "path": "kornia/utils/_compat.py"}]} | 3,116 | 332 |
gh_patches_debug_23334 | rasdani/github-patches | git_diff | NVIDIA__NVFlare-318 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unable to run poc command if nvflare is installed by pip install -e .
</issue>
<code>
[start of nvflare/lighter/poc.py]
1 # Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import argparse
16 import os
17 import pathlib
18 import shutil
19
20
21 def clone_client(num_clients: int):
22 current_path = os.getcwd()
23 poc_folder = os.path.join(current_path, "poc")
24 src_folder = os.path.join(poc_folder, "client")
25 for index in range(1, num_clients + 1):
26 dst_folder = os.path.join(poc_folder, f"site-{index}")
27 shutil.copytree(src_folder, dst_folder)
28 start_sh = open(os.path.join(dst_folder, "startup", "start.sh"), "rt")
29 content = start_sh.read()
30 start_sh.close()
31 content = content.replace("NNN", f"{index}")
32 with open(os.path.join(dst_folder, "startup", "start.sh"), "wt") as f:
33 f.write(content)
34 shutil.rmtree(src_folder)
35
36
37 def main():
38 parser = argparse.ArgumentParser()
39 parser.add_argument("-n", "--num_clients", type=int, default=1, help="number of client folders to create")
40
41 args = parser.parse_args()
42
43 file_dir_path = pathlib.Path(__file__).parent.absolute()
44 poc_zip_path = file_dir_path.parent / "poc.zip"
45 answer = input("This will delete poc folder in current directory and create a new one. Is it OK to proceed? (y/N) ")
46 if answer.strip().upper() == "Y":
47 dest_poc_folder = os.path.join(os.getcwd(), "poc")
48 shutil.rmtree(dest_poc_folder, ignore_errors=True)
49 shutil.unpack_archive(poc_zip_path)
50 for root, dirs, files in os.walk(dest_poc_folder):
51 for file in files:
52 if file.endswith(".sh"):
53 os.chmod(os.path.join(root, file), 0o755)
54 clone_client(args.num_clients)
55 print("Successfully creating poc folder. Please read poc/Readme.rst for user guide.")
56
57
58 if __name__ == "__main__":
59 main()
60
[end of nvflare/lighter/poc.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nvflare/lighter/poc.py b/nvflare/lighter/poc.py
--- a/nvflare/lighter/poc.py
+++ b/nvflare/lighter/poc.py
@@ -42,11 +42,20 @@
file_dir_path = pathlib.Path(__file__).parent.absolute()
poc_zip_path = file_dir_path.parent / "poc.zip"
+ poc_folder_path = file_dir_path.parent / "poc"
answer = input("This will delete poc folder in current directory and create a new one. Is it OK to proceed? (y/N) ")
if answer.strip().upper() == "Y":
dest_poc_folder = os.path.join(os.getcwd(), "poc")
shutil.rmtree(dest_poc_folder, ignore_errors=True)
- shutil.unpack_archive(poc_zip_path)
+ try:
+ shutil.unpack_archive(poc_zip_path)
+ except shutil.ReadError:
+ print(f"poc.zip not found at {poc_zip_path}, try to use template poc folder")
+ try:
+ shutil.copytree(poc_folder_path, dest_poc_folder)
+ except BaseException:
+ print(f"Unable to copy poc folder from {poc_folder_path}. Exit")
+ exit(1)
for root, dirs, files in os.walk(dest_poc_folder):
for file in files:
if file.endswith(".sh"):
| {"golden_diff": "diff --git a/nvflare/lighter/poc.py b/nvflare/lighter/poc.py\n--- a/nvflare/lighter/poc.py\n+++ b/nvflare/lighter/poc.py\n@@ -42,11 +42,20 @@\n \n file_dir_path = pathlib.Path(__file__).parent.absolute()\n poc_zip_path = file_dir_path.parent / \"poc.zip\"\n+ poc_folder_path = file_dir_path.parent / \"poc\"\n answer = input(\"This will delete poc folder in current directory and create a new one. Is it OK to proceed? (y/N) \")\n if answer.strip().upper() == \"Y\":\n dest_poc_folder = os.path.join(os.getcwd(), \"poc\")\n shutil.rmtree(dest_poc_folder, ignore_errors=True)\n- shutil.unpack_archive(poc_zip_path)\n+ try:\n+ shutil.unpack_archive(poc_zip_path)\n+ except shutil.ReadError:\n+ print(f\"poc.zip not found at {poc_zip_path}, try to use template poc folder\")\n+ try:\n+ shutil.copytree(poc_folder_path, dest_poc_folder)\n+ except BaseException:\n+ print(f\"Unable to copy poc folder from {poc_folder_path}. Exit\")\n+ exit(1)\n for root, dirs, files in os.walk(dest_poc_folder):\n for file in files:\n if file.endswith(\".sh\"):\n", "issue": "Unable to run poc command if nvflare is installed by pip install -e .\n\n", "before_files": [{"content": "# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport os\nimport pathlib\nimport shutil\n\n\ndef clone_client(num_clients: int):\n current_path = os.getcwd()\n poc_folder = os.path.join(current_path, \"poc\")\n src_folder = os.path.join(poc_folder, \"client\")\n for index in range(1, num_clients + 1):\n dst_folder = os.path.join(poc_folder, f\"site-{index}\")\n shutil.copytree(src_folder, dst_folder)\n start_sh = open(os.path.join(dst_folder, \"startup\", \"start.sh\"), \"rt\")\n content = start_sh.read()\n start_sh.close()\n content = content.replace(\"NNN\", f\"{index}\")\n with open(os.path.join(dst_folder, \"startup\", \"start.sh\"), \"wt\") as f:\n f.write(content)\n shutil.rmtree(src_folder)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-n\", \"--num_clients\", type=int, default=1, help=\"number of client folders to create\")\n\n args = parser.parse_args()\n\n file_dir_path = pathlib.Path(__file__).parent.absolute()\n poc_zip_path = file_dir_path.parent / \"poc.zip\"\n answer = input(\"This will delete poc folder in current directory and create a new one. Is it OK to proceed? (y/N) \")\n if answer.strip().upper() == \"Y\":\n dest_poc_folder = os.path.join(os.getcwd(), \"poc\")\n shutil.rmtree(dest_poc_folder, ignore_errors=True)\n shutil.unpack_archive(poc_zip_path)\n for root, dirs, files in os.walk(dest_poc_folder):\n for file in files:\n if file.endswith(\".sh\"):\n os.chmod(os.path.join(root, file), 0o755)\n clone_client(args.num_clients)\n print(\"Successfully creating poc folder. Please read poc/Readme.rst for user guide.\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "nvflare/lighter/poc.py"}]} | 1,229 | 305 |
gh_patches_debug_60482 | rasdani/github-patches | git_diff | mars-project__mars-679 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
[BUG] Web session doesn't work with large data source
**Describe the bug**
The default `max_buffer_size` of tornado HTTP server is 100M, when constructing dataframes from large pandas dataframes it will raise `Content-Length too long`.
</issue>
<code>
[start of mars/web/server.py]
1 # Copyright 1999-2018 Alibaba Group Holding Ltd.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import functools
16 import json
17 import logging
18 import threading
19 import os
20 from collections import defaultdict
21
22 import numpy as np
23 import pyarrow
24 from bokeh.application import Application
25 from bokeh.application.handlers import FunctionHandler
26 from bokeh.server.server import Server
27 import jinja2
28 from tornado import web, ioloop
29
30 from ..compat import six
31 from ..utils import get_next_port
32 from ..scheduler import ResourceActor, SessionActor
33 from ..api import MarsAPI
34
35 logger = logging.getLogger(__name__)
36
37
38 def get_jinja_env():
39 from datetime import datetime
40 from ..utils import readable_size
41
42 _jinja_env = jinja2.Environment(
43 loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
44 )
45
46 def format_ts(value):
47 if value is None or np.isnan(value):
48 return None
49 return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')
50
51 _jinja_env.filters['format_ts'] = format_ts
52 _jinja_env.filters['readable_size'] = readable_size
53 return _jinja_env
54
55
56 class BokehStaticFileHandler(web.StaticFileHandler):
57 @classmethod
58 def get_absolute_path(cls, root, path):
59 from bokeh import server
60 path_parts = path.rsplit('/', 1)
61 if 'bokeh' in path_parts[-1]:
62 root = os.path.join(os.path.dirname(server.__file__), "static")
63 return super(BokehStaticFileHandler, cls).get_absolute_path(root, path)
64
65 def validate_absolute_path(self, root, absolute_path):
66 from bokeh import server
67 path_parts = absolute_path.rsplit('/', 1)
68 if 'bokeh' in path_parts[-1]:
69 root = os.path.join(os.path.dirname(server.__file__), "static")
70 return super(BokehStaticFileHandler, self).validate_absolute_path(root, absolute_path)
71
72
73 class MarsRequestHandler(web.RequestHandler):
74 def initialize(self, scheduler_ip):
75 self._scheduler = scheduler_ip
76 self.web_api = MarsWebAPI(scheduler_ip)
77
78
79 class MarsWebAPI(MarsAPI):
80 def __init__(self, scheduler_ip):
81 super(MarsWebAPI, self).__init__(scheduler_ip)
82
83 def get_tasks_info(self, select_session_id=None):
84 from ..scheduler import GraphState
85
86 sessions = defaultdict(dict)
87 for session_id, session_ref in six.iteritems(self.session_manager.get_sessions()):
88 if select_session_id and session_id != select_session_id:
89 continue
90 session_desc = sessions[session_id]
91 session_desc['id'] = session_id
92 session_desc['name'] = session_id
93 session_desc['tasks'] = dict()
94 session_ref = self.actor_client.actor_ref(session_ref)
95 for graph_key, graph_meta_ref in six.iteritems(session_ref.get_graph_meta_refs()):
96 task_desc = dict()
97
98 state = self.get_graph_state(session_id, graph_key)
99 if state == GraphState.PREPARING:
100 task_desc['state'] = state.name.lower()
101 session_desc['tasks'][graph_key] = task_desc
102 continue
103
104 graph_meta_ref = self.actor_client.actor_ref(graph_meta_ref)
105 task_desc['id'] = graph_key
106 task_desc['state'] = graph_meta_ref.get_state().value
107 start_time, end_time, graph_size = graph_meta_ref.get_graph_info()
108 task_desc['start_time'] = start_time
109 task_desc['end_time'] = end_time
110 task_desc['graph_size'] = graph_size
111
112 session_desc['tasks'][graph_key] = task_desc
113 return sessions
114
115 def get_task_detail(self, session_id, task_id):
116 graph_meta_ref = self.get_graph_meta_ref(session_id, task_id)
117 return graph_meta_ref.calc_stats()
118
119 def get_operand_info(self, session_id, task_id, state=None):
120 graph_meta_ref = self.get_graph_meta_ref(session_id, task_id)
121 return graph_meta_ref.get_operand_info(state=state)
122
123 def get_workers_meta(self):
124 resource_uid = ResourceActor.default_uid()
125 resource_ref = self.get_actor_ref(resource_uid)
126 return resource_ref.get_workers_meta()
127
128 def query_worker_events(self, endpoint, category, time_start=None, time_end=None):
129 from ..worker import EventsActor
130 ref = self.actor_client.actor_ref(EventsActor.default_uid(), address=endpoint)
131 return ref.query_by_time(category, time_start=time_start, time_end=time_end)
132
133 def write_mutable_tensor(self, session_id, name, payload_type, body):
134 from ..serialize import dataserializer
135 from ..tensor.core import Indexes
136 session_uid = SessionActor.gen_uid(session_id)
137 session_ref = self.get_actor_ref(session_uid)
138
139 index_json_size = np.asscalar(np.frombuffer(body[0:8], dtype=np.int64))
140 index_json = json.loads(body[8:8+index_json_size].decode('ascii'))
141 index = Indexes.from_json(index_json).indexes
142 if payload_type is None:
143 value = dataserializer.loads(body[8+index_json_size:], raw=False)
144 elif payload_type == 'tensor':
145 tensor_chunk_offset = 8 + index_json_size
146 with pyarrow.BufferReader(body[tensor_chunk_offset:]) as reader:
147 value = pyarrow.read_tensor(reader).to_numpy()
148 elif payload_type == 'record_batch':
149 schema_size = np.asscalar(np.frombuffer(body[8+index_json_size:8+index_json_size+8], dtype=np.int64))
150 schema_offset = 8 + index_json_size + 8
151 with pyarrow.BufferReader(body[schema_offset:schema_offset+schema_size]) as reader:
152 schema = pyarrow.read_schema(reader)
153 record_batch_offset = schema_offset + schema_size
154 with pyarrow.BufferReader(body[record_batch_offset:]) as reader:
155 record_batch = pyarrow.read_record_batch(reader, schema)
156 value = record_batch.to_pandas().to_records(index=False)
157 else:
158 raise ValueError('Not supported payload type: %s' % payload_type)
159 return session_ref.write_mutable_tensor(name, index, value)
160
161
162 class MarsWeb(object):
163 def __init__(self, port=None, scheduler_ip=None):
164 self._port = port
165 self._scheduler_ip = scheduler_ip
166 self._server = None
167 self._server_thread = None
168
169 @property
170 def port(self):
171 return self._port
172
173 @staticmethod
174 def _configure_loop():
175 try:
176 ioloop.IOLoop.current()
177 except RuntimeError:
178 if six.PY3:
179 import asyncio
180 asyncio.set_event_loop(asyncio.new_event_loop())
181 loop = None
182 try:
183 loop = ioloop.IOLoop.current()
184 except: # noqa: E722
185 pass
186 if loop is None:
187 raise
188 else:
189 raise
190
191 def _try_start_web_server(self):
192 static_path = os.path.join(os.path.dirname(__file__), 'static')
193
194 handlers = dict()
195 for p, h in _bokeh_apps.items():
196 handlers[p] = Application(FunctionHandler(functools.partial(h, self._scheduler_ip)))
197
198 handler_kwargs = {'scheduler_ip': self._scheduler_ip}
199 extra_patterns = [
200 ('/static/(.*)', BokehStaticFileHandler, {'path': static_path})
201 ]
202 for p, h in _web_handlers.items():
203 extra_patterns.append((p, h, handler_kwargs))
204
205 retrial = 5
206 while retrial:
207 try:
208 if self._port is None:
209 use_port = get_next_port()
210 else:
211 use_port = self._port
212
213 self._server = Server(
214 handlers, allow_websocket_origin=['*'],
215 address='0.0.0.0', port=use_port,
216 extra_patterns=extra_patterns,
217 )
218 self._server.start()
219 self._port = use_port
220 logger.info('Mars UI started at 0.0.0.0:%d', self._port)
221 break
222 except OSError:
223 if self._port is not None:
224 raise
225 retrial -= 1
226 if retrial == 0:
227 raise
228
229 def start(self, event=None, block=False):
230 self._configure_loop()
231 self._try_start_web_server()
232
233 if not block:
234 self._server_thread = threading.Thread(target=self._server.io_loop.start)
235 self._server_thread.daemon = True
236 self._server_thread.start()
237
238 if event:
239 event.set()
240 else:
241 if event:
242 event.set()
243
244 self._server.io_loop.start()
245
246 def stop(self):
247 if self._server is not None:
248 self._server.io_loop.stop()
249 self._server.stop()
250
251
252 _bokeh_apps = dict()
253 _web_handlers = dict()
254
255
256 def register_bokeh_app(pattern, handler):
257 _bokeh_apps[pattern] = handler
258
259
260 def register_web_handler(pattern, handler):
261 _web_handlers[pattern] = handler
262
[end of mars/web/server.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/mars/web/server.py b/mars/web/server.py
--- a/mars/web/server.py
+++ b/mars/web/server.py
@@ -214,6 +214,7 @@
handlers, allow_websocket_origin=['*'],
address='0.0.0.0', port=use_port,
extra_patterns=extra_patterns,
+ http_server_kwargs={'max_buffer_size': 2 ** 32},
)
self._server.start()
self._port = use_port
| {"golden_diff": "diff --git a/mars/web/server.py b/mars/web/server.py\n--- a/mars/web/server.py\n+++ b/mars/web/server.py\n@@ -214,6 +214,7 @@\n handlers, allow_websocket_origin=['*'],\n address='0.0.0.0', port=use_port,\n extra_patterns=extra_patterns,\n+ http_server_kwargs={'max_buffer_size': 2 ** 32},\n )\n self._server.start()\n self._port = use_port\n", "issue": "[BUG] Web session doesn't work with large data source\n**Describe the bug**\r\n\r\nThe default `max_buffer_size` of tornado HTTP server is 100M, when constructing dataframes from large pandas dataframes it will raise `Content-Length too long`.\r\n\r\n\n", "before_files": [{"content": "# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport functools\nimport json\nimport logging\nimport threading\nimport os\nfrom collections import defaultdict\n\nimport numpy as np\nimport pyarrow\nfrom bokeh.application import Application\nfrom bokeh.application.handlers import FunctionHandler\nfrom bokeh.server.server import Server\nimport jinja2\nfrom tornado import web, ioloop\n\nfrom ..compat import six\nfrom ..utils import get_next_port\nfrom ..scheduler import ResourceActor, SessionActor\nfrom ..api import MarsAPI\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_jinja_env():\n from datetime import datetime\n from ..utils import readable_size\n\n _jinja_env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),\n )\n\n def format_ts(value):\n if value is None or np.isnan(value):\n return None\n return datetime.fromtimestamp(value).strftime('%Y-%m-%d %H:%M:%S')\n\n _jinja_env.filters['format_ts'] = format_ts\n _jinja_env.filters['readable_size'] = readable_size\n return _jinja_env\n\n\nclass BokehStaticFileHandler(web.StaticFileHandler):\n @classmethod\n def get_absolute_path(cls, root, path):\n from bokeh import server\n path_parts = path.rsplit('/', 1)\n if 'bokeh' in path_parts[-1]:\n root = os.path.join(os.path.dirname(server.__file__), \"static\")\n return super(BokehStaticFileHandler, cls).get_absolute_path(root, path)\n\n def validate_absolute_path(self, root, absolute_path):\n from bokeh import server\n path_parts = absolute_path.rsplit('/', 1)\n if 'bokeh' in path_parts[-1]:\n root = os.path.join(os.path.dirname(server.__file__), \"static\")\n return super(BokehStaticFileHandler, self).validate_absolute_path(root, absolute_path)\n\n\nclass MarsRequestHandler(web.RequestHandler):\n def initialize(self, scheduler_ip):\n self._scheduler = scheduler_ip\n self.web_api = MarsWebAPI(scheduler_ip)\n\n\nclass MarsWebAPI(MarsAPI):\n def __init__(self, scheduler_ip):\n super(MarsWebAPI, self).__init__(scheduler_ip)\n\n def get_tasks_info(self, select_session_id=None):\n from ..scheduler import GraphState\n\n sessions = defaultdict(dict)\n for session_id, session_ref in six.iteritems(self.session_manager.get_sessions()):\n if select_session_id and session_id != select_session_id:\n continue\n session_desc = sessions[session_id]\n session_desc['id'] = session_id\n session_desc['name'] = session_id\n session_desc['tasks'] = dict()\n session_ref = self.actor_client.actor_ref(session_ref)\n for graph_key, graph_meta_ref in six.iteritems(session_ref.get_graph_meta_refs()):\n task_desc = dict()\n\n state = self.get_graph_state(session_id, graph_key)\n if state == GraphState.PREPARING:\n task_desc['state'] = state.name.lower()\n session_desc['tasks'][graph_key] = task_desc\n continue\n\n graph_meta_ref = self.actor_client.actor_ref(graph_meta_ref)\n task_desc['id'] = graph_key\n task_desc['state'] = graph_meta_ref.get_state().value\n start_time, end_time, graph_size = graph_meta_ref.get_graph_info()\n task_desc['start_time'] = start_time\n task_desc['end_time'] = end_time\n task_desc['graph_size'] = graph_size\n\n session_desc['tasks'][graph_key] = task_desc\n return sessions\n\n def get_task_detail(self, session_id, task_id):\n graph_meta_ref = self.get_graph_meta_ref(session_id, task_id)\n return graph_meta_ref.calc_stats()\n\n def get_operand_info(self, session_id, task_id, state=None):\n graph_meta_ref = self.get_graph_meta_ref(session_id, task_id)\n return graph_meta_ref.get_operand_info(state=state)\n\n def get_workers_meta(self):\n resource_uid = ResourceActor.default_uid()\n resource_ref = self.get_actor_ref(resource_uid)\n return resource_ref.get_workers_meta()\n\n def query_worker_events(self, endpoint, category, time_start=None, time_end=None):\n from ..worker import EventsActor\n ref = self.actor_client.actor_ref(EventsActor.default_uid(), address=endpoint)\n return ref.query_by_time(category, time_start=time_start, time_end=time_end)\n\n def write_mutable_tensor(self, session_id, name, payload_type, body):\n from ..serialize import dataserializer\n from ..tensor.core import Indexes\n session_uid = SessionActor.gen_uid(session_id)\n session_ref = self.get_actor_ref(session_uid)\n\n index_json_size = np.asscalar(np.frombuffer(body[0:8], dtype=np.int64))\n index_json = json.loads(body[8:8+index_json_size].decode('ascii'))\n index = Indexes.from_json(index_json).indexes\n if payload_type is None:\n value = dataserializer.loads(body[8+index_json_size:], raw=False)\n elif payload_type == 'tensor':\n tensor_chunk_offset = 8 + index_json_size\n with pyarrow.BufferReader(body[tensor_chunk_offset:]) as reader:\n value = pyarrow.read_tensor(reader).to_numpy()\n elif payload_type == 'record_batch':\n schema_size = np.asscalar(np.frombuffer(body[8+index_json_size:8+index_json_size+8], dtype=np.int64))\n schema_offset = 8 + index_json_size + 8\n with pyarrow.BufferReader(body[schema_offset:schema_offset+schema_size]) as reader:\n schema = pyarrow.read_schema(reader)\n record_batch_offset = schema_offset + schema_size\n with pyarrow.BufferReader(body[record_batch_offset:]) as reader:\n record_batch = pyarrow.read_record_batch(reader, schema)\n value = record_batch.to_pandas().to_records(index=False)\n else:\n raise ValueError('Not supported payload type: %s' % payload_type)\n return session_ref.write_mutable_tensor(name, index, value)\n\n\nclass MarsWeb(object):\n def __init__(self, port=None, scheduler_ip=None):\n self._port = port\n self._scheduler_ip = scheduler_ip\n self._server = None\n self._server_thread = None\n\n @property\n def port(self):\n return self._port\n\n @staticmethod\n def _configure_loop():\n try:\n ioloop.IOLoop.current()\n except RuntimeError:\n if six.PY3:\n import asyncio\n asyncio.set_event_loop(asyncio.new_event_loop())\n loop = None\n try:\n loop = ioloop.IOLoop.current()\n except: # noqa: E722\n pass\n if loop is None:\n raise\n else:\n raise\n\n def _try_start_web_server(self):\n static_path = os.path.join(os.path.dirname(__file__), 'static')\n\n handlers = dict()\n for p, h in _bokeh_apps.items():\n handlers[p] = Application(FunctionHandler(functools.partial(h, self._scheduler_ip)))\n\n handler_kwargs = {'scheduler_ip': self._scheduler_ip}\n extra_patterns = [\n ('/static/(.*)', BokehStaticFileHandler, {'path': static_path})\n ]\n for p, h in _web_handlers.items():\n extra_patterns.append((p, h, handler_kwargs))\n\n retrial = 5\n while retrial:\n try:\n if self._port is None:\n use_port = get_next_port()\n else:\n use_port = self._port\n\n self._server = Server(\n handlers, allow_websocket_origin=['*'],\n address='0.0.0.0', port=use_port,\n extra_patterns=extra_patterns,\n )\n self._server.start()\n self._port = use_port\n logger.info('Mars UI started at 0.0.0.0:%d', self._port)\n break\n except OSError:\n if self._port is not None:\n raise\n retrial -= 1\n if retrial == 0:\n raise\n\n def start(self, event=None, block=False):\n self._configure_loop()\n self._try_start_web_server()\n\n if not block:\n self._server_thread = threading.Thread(target=self._server.io_loop.start)\n self._server_thread.daemon = True\n self._server_thread.start()\n\n if event:\n event.set()\n else:\n if event:\n event.set()\n\n self._server.io_loop.start()\n\n def stop(self):\n if self._server is not None:\n self._server.io_loop.stop()\n self._server.stop()\n\n\n_bokeh_apps = dict()\n_web_handlers = dict()\n\n\ndef register_bokeh_app(pattern, handler):\n _bokeh_apps[pattern] = handler\n\n\ndef register_web_handler(pattern, handler):\n _web_handlers[pattern] = handler\n", "path": "mars/web/server.py"}]} | 3,361 | 112 |
gh_patches_debug_22341 | rasdani/github-patches | git_diff | pantsbuild__pants-5068 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
The 1.3.X branch does not sync native_engine.so binaries to s3
As a result, 1.3.X installs fail to fetch a native engine binary.
</issue>
<code>
[start of src/python/pants/binaries/binary_util.py]
1 # coding=utf-8
2 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
3 # Licensed under the Apache License, Version 2.0 (see LICENSE).
4
5 from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
6 unicode_literals, with_statement)
7
8 import logging
9 import os
10 import posixpath
11 from contextlib import contextmanager
12
13 from twitter.common.collections import OrderedSet
14
15 from pants.base.build_environment import get_buildroot
16 from pants.base.exceptions import TaskError
17 from pants.net.http.fetcher import Fetcher
18 from pants.subsystem.subsystem import Subsystem
19 from pants.util.contextutil import temporary_file
20 from pants.util.dirutil import chmod_plus_x, safe_delete, safe_open
21 from pants.util.osutil import get_os_id
22
23
24 _DEFAULT_PATH_BY_ID = {
25 ('linux', 'x86_64'): ('linux', 'x86_64'),
26 ('linux', 'amd64'): ('linux', 'x86_64'),
27 ('linux', 'i386'): ('linux', 'i386'),
28 ('linux', 'i686'): ('linux', 'i386'),
29 ('darwin', '9'): ('mac', '10.5'),
30 ('darwin', '10'): ('mac', '10.6'),
31 ('darwin', '11'): ('mac', '10.7'),
32 ('darwin', '12'): ('mac', '10.8'),
33 ('darwin', '13'): ('mac', '10.9'),
34 ('darwin', '14'): ('mac', '10.10'),
35 ('darwin', '15'): ('mac', '10.11'),
36 ('darwin', '16'): ('mac', '10.12'),
37 ('darwin', '17'): ('mac', '10.13'),
38 }
39
40
41 logger = logging.getLogger(__name__)
42
43
44 class BinaryUtil(object):
45 """Wraps utility methods for finding binary executables.
46
47 :API: public
48 """
49
50 class Factory(Subsystem):
51 """
52 :API: public
53 """
54 options_scope = 'binaries'
55
56 @classmethod
57 def register_options(cls, register):
58 register('--baseurls', type=list, advanced=True,
59 default=['https://s3.amazonaws.com/binaries.pantsbuild.org'],
60 help='List of urls from which binary tools are downloaded. Urls are searched in '
61 'order until the requested path is found.')
62 register('--fetch-timeout-secs', type=int, default=30, advanced=True,
63 help='Timeout in seconds for url reads when fetching binary tools from the '
64 'repos specified by --baseurls')
65 register('--path-by-id', type=dict, advanced=True,
66 help='Maps output of uname for a machine to a binary search path. e.g. '
67 '{ ("darwin", "15"): ["mac", "10.11"]), ("linux", "arm32"): ["linux", "arm32"] }')
68
69 @classmethod
70 def create(cls):
71 """
72 :API: public
73 """
74 # NB: create is a class method to ~force binary fetch location to be global.
75 options = cls.global_instance().get_options()
76 return BinaryUtil(options.baseurls, options.fetch_timeout_secs, options.pants_bootstrapdir,
77 options.path_by_id)
78
79 class MissingMachineInfo(TaskError):
80 """Indicates that pants was unable to map this machine's OS to a binary path prefix."""
81 pass
82
83 class BinaryNotFound(TaskError):
84
85 def __init__(self, binary, accumulated_errors):
86 super(BinaryUtil.BinaryNotFound, self).__init__(
87 'Failed to fetch binary {binary} from any source: ({sources})'
88 .format(binary=binary, sources=', '.join(accumulated_errors)))
89
90 class NoBaseUrlsError(TaskError):
91 """Indicates that no urls were specified in pants.ini."""
92 pass
93
94 def _select_binary_base_path(self, supportdir, version, name, uname_func=None):
95 """Calculate the base path.
96
97 Exposed for associated unit tests.
98 :param supportdir: the path used to make a path under --pants_bootstrapdir.
99 :param version: the version number of the tool used to make a path under --pants-bootstrapdir.
100 :param name: name of the binary to search for. (e.g 'protoc')
101 :param uname_func: method to use to emulate os.uname() in testing
102 :returns: Base path used to select the binary file.
103 """
104 uname_func = uname_func or os.uname
105 os_id = get_os_id(uname_func=uname_func)
106 if not os_id:
107 raise self.MissingMachineInfo('Pants has no binaries for {}'.format(' '.join(uname_func())))
108
109 try:
110 middle_path = self._path_by_id[os_id]
111 except KeyError:
112 raise self.MissingMachineInfo('Update --binaries-path-by-id to find binaries for {!r}'
113 .format(os_id))
114 return os.path.join(supportdir, *(middle_path + (version, name)))
115
116 def __init__(self, baseurls, timeout_secs, bootstrapdir, path_by_id=None):
117 """Creates a BinaryUtil with the given settings to define binary lookup behavior.
118
119 This constructor is primarily used for testing. Production code will usually initialize
120 an instance using the BinaryUtil.Factory.create() method.
121
122 :param baseurls: URL prefixes which represent repositories of binaries.
123 :type baseurls: list of string
124 :param int timeout_secs: Timeout in seconds for url reads.
125 :param string bootstrapdir: Directory to use for caching binaries. Uses this directory to
126 search for binaries in, or download binaries to if needed.
127 :param dict path_by_id: Additional mapping from (sysname, id) -> (os, arch) for tool
128 directory naming
129 """
130 self._baseurls = baseurls
131 self._timeout_secs = timeout_secs
132 self._pants_bootstrapdir = bootstrapdir
133 self._path_by_id = _DEFAULT_PATH_BY_ID.copy()
134 if path_by_id:
135 self._path_by_id.update((tuple(k), tuple(v)) for k, v in path_by_id.items())
136
137 @contextmanager
138 def _select_binary_stream(self, name, binary_path, fetcher=None):
139 """Select a binary matching the current os and architecture.
140
141 :param string binary_path: The path to the binary to fetch.
142 :param fetcher: Optional argument used only for testing, to 'pretend' to open urls.
143 :returns: a 'stream' to download it from a support directory. The returned 'stream' is actually
144 a lambda function which returns the files binary contents.
145 :raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no binary of the given version
146 and name could be found for the current platform.
147 """
148
149 if not self._baseurls:
150 raise self.NoBaseUrlsError(
151 'No urls are defined for the --pants-support-baseurls option.')
152 downloaded_successfully = False
153 accumulated_errors = []
154 for baseurl in OrderedSet(self._baseurls): # De-dup URLS: we only want to try each URL once.
155 url = posixpath.join(baseurl, binary_path)
156 logger.info('Attempting to fetch {name} binary from: {url} ...'.format(name=name, url=url))
157 try:
158 with temporary_file() as dest:
159 fetcher = fetcher or Fetcher(get_buildroot())
160 fetcher.download(url,
161 listener=Fetcher.ProgressListener(),
162 path_or_fd=dest,
163 timeout_secs=self._timeout_secs)
164 logger.info('Fetched {name} binary from: {url} .'.format(name=name, url=url))
165 downloaded_successfully = True
166 dest.seek(0)
167 yield lambda: dest.read()
168 break
169 except (IOError, Fetcher.Error, ValueError) as e:
170 accumulated_errors.append('Failed to fetch binary from {url}: {error}'
171 .format(url=url, error=e))
172 if not downloaded_successfully:
173 raise self.BinaryNotFound(binary_path, accumulated_errors)
174
175 def select_binary(self, supportdir, version, name):
176 """Selects a binary matching the current os and architecture.
177
178 :param string supportdir: The path the `name` binaries are stored under.
179 :param string version: The version number of the binary to select.
180 :param string name: The name of the binary to fetch.
181 :raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no binary of the given version
182 and name could be found for the current platform.
183 """
184 # TODO(John Sirois): finish doc of the path structure expected under base_path.
185 binary_path = self._select_binary_base_path(supportdir, version, name)
186 return self._fetch_binary(name=name, binary_path=binary_path)
187
188 def select_script(self, supportdir, version, name):
189 """Selects a platform-independent script.
190
191 :param string supportdir: The path the `name` scripts are stored under.
192 :param string version: The version number of the script to select.
193 :param string name: The name of the script to fetch.
194 :raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no script of the given version
195 and name could be found.
196 """
197 binary_path = os.path.join(supportdir, version, name)
198 return self._fetch_binary(name=name, binary_path=binary_path)
199
200 def _fetch_binary(self, name, binary_path):
201 bootstrap_dir = os.path.realpath(os.path.expanduser(self._pants_bootstrapdir))
202 bootstrapped_binary_path = os.path.join(bootstrap_dir, binary_path)
203 if not os.path.exists(bootstrapped_binary_path):
204 downloadpath = bootstrapped_binary_path + '~'
205 try:
206 with self._select_binary_stream(name, binary_path) as stream:
207 with safe_open(downloadpath, 'wb') as bootstrapped_binary:
208 bootstrapped_binary.write(stream())
209 os.rename(downloadpath, bootstrapped_binary_path)
210 chmod_plus_x(bootstrapped_binary_path)
211 finally:
212 safe_delete(downloadpath)
213
214 logger.debug('Selected {binary} binary bootstrapped to: {path}'
215 .format(binary=name, path=bootstrapped_binary_path))
216 return bootstrapped_binary_path
217
[end of src/python/pants/binaries/binary_util.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/python/pants/binaries/binary_util.py b/src/python/pants/binaries/binary_util.py
--- a/src/python/pants/binaries/binary_util.py
+++ b/src/python/pants/binaries/binary_util.py
@@ -56,7 +56,14 @@
@classmethod
def register_options(cls, register):
register('--baseurls', type=list, advanced=True,
- default=['https://s3.amazonaws.com/binaries.pantsbuild.org'],
+ default=['https://binaries.pantsbuild.org',
+ # NB: The 1.3.x branch uses a native engine packaging scheme and release
+ # process that differs widely from the mainline. As a result, we include
+ # the legacy bintray url (where the native engine is deployed to on this
+ # branch) to allow for deployment and fetching of the native engine without
+ # major surgery on the deployment and release processes.
+ # See: https://github.com/pantsbuild/pants/issues/5061
+ 'https://dl.bintray.com/pantsbuild/bin/build-support'],
help='List of urls from which binary tools are downloaded. Urls are searched in '
'order until the requested path is found.')
register('--fetch-timeout-secs', type=int, default=30, advanced=True,
| {"golden_diff": "diff --git a/src/python/pants/binaries/binary_util.py b/src/python/pants/binaries/binary_util.py\n--- a/src/python/pants/binaries/binary_util.py\n+++ b/src/python/pants/binaries/binary_util.py\n@@ -56,7 +56,14 @@\n @classmethod\n def register_options(cls, register):\n register('--baseurls', type=list, advanced=True,\n- default=['https://s3.amazonaws.com/binaries.pantsbuild.org'],\n+ default=['https://binaries.pantsbuild.org',\n+ # NB: The 1.3.x branch uses a native engine packaging scheme and release\n+ # process that differs widely from the mainline. As a result, we include\n+ # the legacy bintray url (where the native engine is deployed to on this\n+ # branch) to allow for deployment and fetching of the native engine without\n+ # major surgery on the deployment and release processes.\n+ # See: https://github.com/pantsbuild/pants/issues/5061\n+ 'https://dl.bintray.com/pantsbuild/bin/build-support'],\n help='List of urls from which binary tools are downloaded. Urls are searched in '\n 'order until the requested path is found.')\n register('--fetch-timeout-secs', type=int, default=30, advanced=True,\n", "issue": "The 1.3.X branch does not sync native_engine.so binaries to s3\nAs a result, 1.3.X installs fail to fetch a native engine binary.\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport logging\nimport os\nimport posixpath\nfrom contextlib import contextmanager\n\nfrom twitter.common.collections import OrderedSet\n\nfrom pants.base.build_environment import get_buildroot\nfrom pants.base.exceptions import TaskError\nfrom pants.net.http.fetcher import Fetcher\nfrom pants.subsystem.subsystem import Subsystem\nfrom pants.util.contextutil import temporary_file\nfrom pants.util.dirutil import chmod_plus_x, safe_delete, safe_open\nfrom pants.util.osutil import get_os_id\n\n\n_DEFAULT_PATH_BY_ID = {\n ('linux', 'x86_64'): ('linux', 'x86_64'),\n ('linux', 'amd64'): ('linux', 'x86_64'),\n ('linux', 'i386'): ('linux', 'i386'),\n ('linux', 'i686'): ('linux', 'i386'),\n ('darwin', '9'): ('mac', '10.5'),\n ('darwin', '10'): ('mac', '10.6'),\n ('darwin', '11'): ('mac', '10.7'),\n ('darwin', '12'): ('mac', '10.8'),\n ('darwin', '13'): ('mac', '10.9'),\n ('darwin', '14'): ('mac', '10.10'),\n ('darwin', '15'): ('mac', '10.11'),\n ('darwin', '16'): ('mac', '10.12'),\n ('darwin', '17'): ('mac', '10.13'),\n}\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass BinaryUtil(object):\n \"\"\"Wraps utility methods for finding binary executables.\n\n :API: public\n \"\"\"\n\n class Factory(Subsystem):\n \"\"\"\n :API: public\n \"\"\"\n options_scope = 'binaries'\n\n @classmethod\n def register_options(cls, register):\n register('--baseurls', type=list, advanced=True,\n default=['https://s3.amazonaws.com/binaries.pantsbuild.org'],\n help='List of urls from which binary tools are downloaded. Urls are searched in '\n 'order until the requested path is found.')\n register('--fetch-timeout-secs', type=int, default=30, advanced=True,\n help='Timeout in seconds for url reads when fetching binary tools from the '\n 'repos specified by --baseurls')\n register('--path-by-id', type=dict, advanced=True,\n help='Maps output of uname for a machine to a binary search path. e.g. '\n '{ (\"darwin\", \"15\"): [\"mac\", \"10.11\"]), (\"linux\", \"arm32\"): [\"linux\", \"arm32\"] }')\n\n @classmethod\n def create(cls):\n \"\"\"\n :API: public\n \"\"\"\n # NB: create is a class method to ~force binary fetch location to be global.\n options = cls.global_instance().get_options()\n return BinaryUtil(options.baseurls, options.fetch_timeout_secs, options.pants_bootstrapdir,\n options.path_by_id)\n\n class MissingMachineInfo(TaskError):\n \"\"\"Indicates that pants was unable to map this machine's OS to a binary path prefix.\"\"\"\n pass\n\n class BinaryNotFound(TaskError):\n\n def __init__(self, binary, accumulated_errors):\n super(BinaryUtil.BinaryNotFound, self).__init__(\n 'Failed to fetch binary {binary} from any source: ({sources})'\n .format(binary=binary, sources=', '.join(accumulated_errors)))\n\n class NoBaseUrlsError(TaskError):\n \"\"\"Indicates that no urls were specified in pants.ini.\"\"\"\n pass\n\n def _select_binary_base_path(self, supportdir, version, name, uname_func=None):\n \"\"\"Calculate the base path.\n\n Exposed for associated unit tests.\n :param supportdir: the path used to make a path under --pants_bootstrapdir.\n :param version: the version number of the tool used to make a path under --pants-bootstrapdir.\n :param name: name of the binary to search for. (e.g 'protoc')\n :param uname_func: method to use to emulate os.uname() in testing\n :returns: Base path used to select the binary file.\n \"\"\"\n uname_func = uname_func or os.uname\n os_id = get_os_id(uname_func=uname_func)\n if not os_id:\n raise self.MissingMachineInfo('Pants has no binaries for {}'.format(' '.join(uname_func())))\n\n try:\n middle_path = self._path_by_id[os_id]\n except KeyError:\n raise self.MissingMachineInfo('Update --binaries-path-by-id to find binaries for {!r}'\n .format(os_id))\n return os.path.join(supportdir, *(middle_path + (version, name)))\n\n def __init__(self, baseurls, timeout_secs, bootstrapdir, path_by_id=None):\n \"\"\"Creates a BinaryUtil with the given settings to define binary lookup behavior.\n\n This constructor is primarily used for testing. Production code will usually initialize\n an instance using the BinaryUtil.Factory.create() method.\n\n :param baseurls: URL prefixes which represent repositories of binaries.\n :type baseurls: list of string\n :param int timeout_secs: Timeout in seconds for url reads.\n :param string bootstrapdir: Directory to use for caching binaries. Uses this directory to\n search for binaries in, or download binaries to if needed.\n :param dict path_by_id: Additional mapping from (sysname, id) -> (os, arch) for tool\n directory naming\n \"\"\"\n self._baseurls = baseurls\n self._timeout_secs = timeout_secs\n self._pants_bootstrapdir = bootstrapdir\n self._path_by_id = _DEFAULT_PATH_BY_ID.copy()\n if path_by_id:\n self._path_by_id.update((tuple(k), tuple(v)) for k, v in path_by_id.items())\n\n @contextmanager\n def _select_binary_stream(self, name, binary_path, fetcher=None):\n \"\"\"Select a binary matching the current os and architecture.\n\n :param string binary_path: The path to the binary to fetch.\n :param fetcher: Optional argument used only for testing, to 'pretend' to open urls.\n :returns: a 'stream' to download it from a support directory. The returned 'stream' is actually\n a lambda function which returns the files binary contents.\n :raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no binary of the given version\n and name could be found for the current platform.\n \"\"\"\n\n if not self._baseurls:\n raise self.NoBaseUrlsError(\n 'No urls are defined for the --pants-support-baseurls option.')\n downloaded_successfully = False\n accumulated_errors = []\n for baseurl in OrderedSet(self._baseurls): # De-dup URLS: we only want to try each URL once.\n url = posixpath.join(baseurl, binary_path)\n logger.info('Attempting to fetch {name} binary from: {url} ...'.format(name=name, url=url))\n try:\n with temporary_file() as dest:\n fetcher = fetcher or Fetcher(get_buildroot())\n fetcher.download(url,\n listener=Fetcher.ProgressListener(),\n path_or_fd=dest,\n timeout_secs=self._timeout_secs)\n logger.info('Fetched {name} binary from: {url} .'.format(name=name, url=url))\n downloaded_successfully = True\n dest.seek(0)\n yield lambda: dest.read()\n break\n except (IOError, Fetcher.Error, ValueError) as e:\n accumulated_errors.append('Failed to fetch binary from {url}: {error}'\n .format(url=url, error=e))\n if not downloaded_successfully:\n raise self.BinaryNotFound(binary_path, accumulated_errors)\n\n def select_binary(self, supportdir, version, name):\n \"\"\"Selects a binary matching the current os and architecture.\n\n :param string supportdir: The path the `name` binaries are stored under.\n :param string version: The version number of the binary to select.\n :param string name: The name of the binary to fetch.\n :raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no binary of the given version\n and name could be found for the current platform.\n \"\"\"\n # TODO(John Sirois): finish doc of the path structure expected under base_path.\n binary_path = self._select_binary_base_path(supportdir, version, name)\n return self._fetch_binary(name=name, binary_path=binary_path)\n\n def select_script(self, supportdir, version, name):\n \"\"\"Selects a platform-independent script.\n\n :param string supportdir: The path the `name` scripts are stored under.\n :param string version: The version number of the script to select.\n :param string name: The name of the script to fetch.\n :raises: :class:`pants.binary_util.BinaryUtil.BinaryNotFound` if no script of the given version\n and name could be found.\n \"\"\"\n binary_path = os.path.join(supportdir, version, name)\n return self._fetch_binary(name=name, binary_path=binary_path)\n\n def _fetch_binary(self, name, binary_path):\n bootstrap_dir = os.path.realpath(os.path.expanduser(self._pants_bootstrapdir))\n bootstrapped_binary_path = os.path.join(bootstrap_dir, binary_path)\n if not os.path.exists(bootstrapped_binary_path):\n downloadpath = bootstrapped_binary_path + '~'\n try:\n with self._select_binary_stream(name, binary_path) as stream:\n with safe_open(downloadpath, 'wb') as bootstrapped_binary:\n bootstrapped_binary.write(stream())\n os.rename(downloadpath, bootstrapped_binary_path)\n chmod_plus_x(bootstrapped_binary_path)\n finally:\n safe_delete(downloadpath)\n\n logger.debug('Selected {binary} binary bootstrapped to: {path}'\n .format(binary=name, path=bootstrapped_binary_path))\n return bootstrapped_binary_path\n", "path": "src/python/pants/binaries/binary_util.py"}]} | 3,372 | 296 |
gh_patches_debug_33905 | rasdani/github-patches | git_diff | aws-cloudformation__cfn-lint-2873 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
(AWS::Lambda::Function.FileSystemConfigs.LocalMountPath) `The local mount path must start with /mnt/ and be a valid absolute path.` not being checked
### Is this feature request related to a new rule or cfn-lint capabilities?
New capability
### Describe the feature you'd like to request
The following CFN template doesn't check the path for `LocalMountPath`:
```yaml
LambdaFunction:
Type: AWS::Lambda::Function
Properties:
FileSystemConfigs:
- Arn: <arn>
LocalMountPath: /efs
```
According to the Lambda GUI the path needs to start with `/mnt/`:
```
Local mount path
Only absolute paths are supported.
The local mount path must start with /mnt/ and be a valid absolute path.
```
### Describe the solution you'd like
cfn-lint should give you a notification if the path doesn't start with `/mnt/`:
`The local mount path must start with /mnt/ and be a valid absolute path.`
### Additional context
cfn-lint v0.79.7
### Is this something that you'd be interested in working on?
- [ ] 👋 I may be able to implement this feature request
### Would this feature include a breaking change?
- [ ] ⚠️ This feature might incur a breaking change
</issue>
<code>
[start of src/cfnlint/rules/resources/properties/AllowedPattern.py]
1 """
2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 SPDX-License-Identifier: MIT-0
4 """
5 import regex as re
6
7 from cfnlint.helpers import RESOURCE_SPECS
8 from cfnlint.rules import CloudFormationLintRule, RuleMatch
9
10
11 class AllowedPattern(CloudFormationLintRule):
12 """Check if properties have a valid value"""
13
14 id = "E3031"
15 shortdesc = "Check if property values adhere to a specific pattern"
16 description = "Check if properties have a valid value in case of a pattern (Regular Expression)"
17 source_url = "https://github.com/awslabs/cfn-python-lint/blob/main/docs/cfn-resource-specification.md#allowedpattern"
18 tags = ["resources", "property", "allowed pattern", "regex"]
19
20 def __init__(self):
21 """Init"""
22 super().__init__()
23 self.config_definition = {
24 "exceptions": {
25 "default": [],
26 "type": "list",
27 "itemtype": "string",
28 }
29 }
30 self.configure()
31
32 def initialize(self, cfn):
33 """Initialize the rule"""
34 for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get(
35 "ResourceTypes"
36 ):
37 self.resource_property_types.append(resource_type_spec)
38 for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get(
39 "PropertyTypes"
40 ):
41 self.resource_sub_property_types.append(property_type_spec)
42
43 def check_value(self, value, path, property_name, **kwargs):
44 """Check Value"""
45 matches = []
46
47 # Get the Allowed Pattern Regex
48 value_pattern_regex = kwargs.get("value_specs", {}).get(
49 "AllowedPatternRegex", {}
50 )
51 # Get the "Human Readable" version for the error message. Optional, if not specified,
52 # the RegEx itself is used.
53 value_pattern = kwargs.get("value_specs", {}).get(
54 "AllowedPattern", value_pattern_regex
55 )
56
57 if isinstance(value, (int, float)):
58 value = str(value)
59
60 if isinstance(value, str):
61 if value_pattern_regex:
62 regex = re.compile(value_pattern_regex, re.ASCII)
63
64 # Ignore values with dynamic references. Simple check to prevent false-positives
65 # See: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html
66 if "{{resolve:" not in value:
67 if not regex.match(value):
68 for exception in self.config.get("exceptions"):
69 exception_regex = re.compile(exception)
70 if exception_regex.match(value):
71 return matches
72 full_path = "/".join(str(x) for x in path)
73
74 message = "{} contains invalid characters (Pattern: {}) at {}"
75 matches.append(
76 RuleMatch(
77 path,
78 message.format(property_name, value_pattern, full_path),
79 )
80 )
81
82 return matches
83
84 def check(self, cfn, properties, value_specs, property_specs, path):
85 """Check itself"""
86 matches = []
87 for p_value, p_path in properties.items_safe(path[:]):
88 for prop in p_value:
89 if prop in value_specs:
90 value = value_specs.get(prop).get("Value", {})
91 if value:
92 value_type = value.get("ValueType", "")
93 property_type = (
94 property_specs.get("Properties").get(prop).get("Type")
95 )
96 value_specs = (
97 RESOURCE_SPECS.get(cfn.regions[0])
98 .get("ValueTypes")
99 .get(value_type, {})
100 )
101 if value_specs == "CACHED":
102 value_specs = (
103 RESOURCE_SPECS.get("us-east-1")
104 .get("ValueTypes")
105 .get(value_type, {})
106 )
107 matches.extend(
108 cfn.check_value(
109 p_value,
110 prop,
111 p_path,
112 check_value=self.check_value,
113 value_specs=value_specs,
114 cfn=cfn,
115 property_type=property_type,
116 property_name=prop,
117 )
118 )
119 return matches
120
121 def match_resource_sub_properties(self, properties, property_type, path, cfn):
122 """Match for sub properties"""
123 matches = []
124
125 specs = (
126 RESOURCE_SPECS.get(cfn.regions[0])
127 .get("PropertyTypes")
128 .get(property_type, {})
129 .get("Properties", {})
130 )
131 property_specs = (
132 RESOURCE_SPECS.get(cfn.regions[0]).get("PropertyTypes").get(property_type)
133 )
134 matches.extend(self.check(cfn, properties, specs, property_specs, path))
135
136 return matches
137
138 def match_resource_properties(self, properties, resource_type, path, cfn):
139 """Check CloudFormation Properties"""
140 matches = []
141
142 specs = (
143 RESOURCE_SPECS.get(cfn.regions[0])
144 .get("ResourceTypes")
145 .get(resource_type, {})
146 .get("Properties", {})
147 )
148 resource_specs = (
149 RESOURCE_SPECS.get(cfn.regions[0]).get("ResourceTypes").get(resource_type)
150 )
151 matches.extend(self.check(cfn, properties, specs, resource_specs, path))
152
153 return matches
154
[end of src/cfnlint/rules/resources/properties/AllowedPattern.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cfnlint/rules/resources/properties/AllowedPattern.py b/src/cfnlint/rules/resources/properties/AllowedPattern.py
--- a/src/cfnlint/rules/resources/properties/AllowedPattern.py
+++ b/src/cfnlint/rules/resources/properties/AllowedPattern.py
@@ -81,18 +81,16 @@
return matches
- def check(self, cfn, properties, value_specs, property_specs, path):
+ def check(self, cfn, properties, property_specs, path):
"""Check itself"""
matches = []
for p_value, p_path in properties.items_safe(path[:]):
for prop in p_value:
- if prop in value_specs:
- value = value_specs.get(prop).get("Value", {})
+ if prop in property_specs:
+ value = property_specs.get(prop).get("Value", {})
if value:
value_type = value.get("ValueType", "")
- property_type = (
- property_specs.get("Properties").get(prop).get("Type")
- )
+ property_type = property_specs.get(prop).get("Type")
value_specs = (
RESOURCE_SPECS.get(cfn.regions[0])
.get("ValueTypes")
@@ -128,10 +126,7 @@
.get(property_type, {})
.get("Properties", {})
)
- property_specs = (
- RESOURCE_SPECS.get(cfn.regions[0]).get("PropertyTypes").get(property_type)
- )
- matches.extend(self.check(cfn, properties, specs, property_specs, path))
+ matches.extend(self.check(cfn, properties, specs, path))
return matches
@@ -145,9 +140,6 @@
.get(resource_type, {})
.get("Properties", {})
)
- resource_specs = (
- RESOURCE_SPECS.get(cfn.regions[0]).get("ResourceTypes").get(resource_type)
- )
- matches.extend(self.check(cfn, properties, specs, resource_specs, path))
+ matches.extend(self.check(cfn, properties, specs, path))
return matches
| {"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/AllowedPattern.py b/src/cfnlint/rules/resources/properties/AllowedPattern.py\n--- a/src/cfnlint/rules/resources/properties/AllowedPattern.py\n+++ b/src/cfnlint/rules/resources/properties/AllowedPattern.py\n@@ -81,18 +81,16 @@\n \n return matches\n \n- def check(self, cfn, properties, value_specs, property_specs, path):\n+ def check(self, cfn, properties, property_specs, path):\n \"\"\"Check itself\"\"\"\n matches = []\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n- if prop in value_specs:\n- value = value_specs.get(prop).get(\"Value\", {})\n+ if prop in property_specs:\n+ value = property_specs.get(prop).get(\"Value\", {})\n if value:\n value_type = value.get(\"ValueType\", \"\")\n- property_type = (\n- property_specs.get(\"Properties\").get(prop).get(\"Type\")\n- )\n+ property_type = property_specs.get(prop).get(\"Type\")\n value_specs = (\n RESOURCE_SPECS.get(cfn.regions[0])\n .get(\"ValueTypes\")\n@@ -128,10 +126,7 @@\n .get(property_type, {})\n .get(\"Properties\", {})\n )\n- property_specs = (\n- RESOURCE_SPECS.get(cfn.regions[0]).get(\"PropertyTypes\").get(property_type)\n- )\n- matches.extend(self.check(cfn, properties, specs, property_specs, path))\n+ matches.extend(self.check(cfn, properties, specs, path))\n \n return matches\n \n@@ -145,9 +140,6 @@\n .get(resource_type, {})\n .get(\"Properties\", {})\n )\n- resource_specs = (\n- RESOURCE_SPECS.get(cfn.regions[0]).get(\"ResourceTypes\").get(resource_type)\n- )\n- matches.extend(self.check(cfn, properties, specs, resource_specs, path))\n+ matches.extend(self.check(cfn, properties, specs, path))\n \n return matches\n", "issue": "(AWS::Lambda::Function.FileSystemConfigs.LocalMountPath) `The local mount path must start with /mnt/ and be a valid absolute path.` not being checked\n### Is this feature request related to a new rule or cfn-lint capabilities?\n\nNew capability\n\n### Describe the feature you'd like to request\n\nThe following CFN template doesn't check the path for `LocalMountPath`:\r\n\r\n```yaml\r\nLambdaFunction:\r\n Type: AWS::Lambda::Function\r\n Properties:\r\n FileSystemConfigs:\r\n - Arn: <arn>\r\n LocalMountPath: /efs\r\n```\r\n\r\nAccording to the Lambda GUI the path needs to start with `/mnt/`:\r\n\r\n```\r\nLocal mount path\r\nOnly absolute paths are supported.\r\nThe local mount path must start with /mnt/ and be a valid absolute path.\r\n```\n\n### Describe the solution you'd like\n\ncfn-lint should give you a notification if the path doesn't start with `/mnt/`:\r\n\r\n`The local mount path must start with /mnt/ and be a valid absolute path.`\n\n### Additional context\n\ncfn-lint v0.79.7\n\n### Is this something that you'd be interested in working on?\n\n- [ ] \ud83d\udc4b I may be able to implement this feature request\n\n### Would this feature include a breaking change?\n\n- [ ] \u26a0\ufe0f This feature might incur a breaking change\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport regex as re\n\nfrom cfnlint.helpers import RESOURCE_SPECS\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass AllowedPattern(CloudFormationLintRule):\n \"\"\"Check if properties have a valid value\"\"\"\n\n id = \"E3031\"\n shortdesc = \"Check if property values adhere to a specific pattern\"\n description = \"Check if properties have a valid value in case of a pattern (Regular Expression)\"\n source_url = \"https://github.com/awslabs/cfn-python-lint/blob/main/docs/cfn-resource-specification.md#allowedpattern\"\n tags = [\"resources\", \"property\", \"allowed pattern\", \"regex\"]\n\n def __init__(self):\n \"\"\"Init\"\"\"\n super().__init__()\n self.config_definition = {\n \"exceptions\": {\n \"default\": [],\n \"type\": \"list\",\n \"itemtype\": \"string\",\n }\n }\n self.configure()\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get(\n \"ResourceTypes\"\n ):\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get(\n \"PropertyTypes\"\n ):\n self.resource_sub_property_types.append(property_type_spec)\n\n def check_value(self, value, path, property_name, **kwargs):\n \"\"\"Check Value\"\"\"\n matches = []\n\n # Get the Allowed Pattern Regex\n value_pattern_regex = kwargs.get(\"value_specs\", {}).get(\n \"AllowedPatternRegex\", {}\n )\n # Get the \"Human Readable\" version for the error message. Optional, if not specified,\n # the RegEx itself is used.\n value_pattern = kwargs.get(\"value_specs\", {}).get(\n \"AllowedPattern\", value_pattern_regex\n )\n\n if isinstance(value, (int, float)):\n value = str(value)\n\n if isinstance(value, str):\n if value_pattern_regex:\n regex = re.compile(value_pattern_regex, re.ASCII)\n\n # Ignore values with dynamic references. Simple check to prevent false-positives\n # See: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html\n if \"{{resolve:\" not in value:\n if not regex.match(value):\n for exception in self.config.get(\"exceptions\"):\n exception_regex = re.compile(exception)\n if exception_regex.match(value):\n return matches\n full_path = \"/\".join(str(x) for x in path)\n\n message = \"{} contains invalid characters (Pattern: {}) at {}\"\n matches.append(\n RuleMatch(\n path,\n message.format(property_name, value_pattern, full_path),\n )\n )\n\n return matches\n\n def check(self, cfn, properties, value_specs, property_specs, path):\n \"\"\"Check itself\"\"\"\n matches = []\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n if prop in value_specs:\n value = value_specs.get(prop).get(\"Value\", {})\n if value:\n value_type = value.get(\"ValueType\", \"\")\n property_type = (\n property_specs.get(\"Properties\").get(prop).get(\"Type\")\n )\n value_specs = (\n RESOURCE_SPECS.get(cfn.regions[0])\n .get(\"ValueTypes\")\n .get(value_type, {})\n )\n if value_specs == \"CACHED\":\n value_specs = (\n RESOURCE_SPECS.get(\"us-east-1\")\n .get(\"ValueTypes\")\n .get(value_type, {})\n )\n matches.extend(\n cfn.check_value(\n p_value,\n prop,\n p_path,\n check_value=self.check_value,\n value_specs=value_specs,\n cfn=cfn,\n property_type=property_type,\n property_name=prop,\n )\n )\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = []\n\n specs = (\n RESOURCE_SPECS.get(cfn.regions[0])\n .get(\"PropertyTypes\")\n .get(property_type, {})\n .get(\"Properties\", {})\n )\n property_specs = (\n RESOURCE_SPECS.get(cfn.regions[0]).get(\"PropertyTypes\").get(property_type)\n )\n matches.extend(self.check(cfn, properties, specs, property_specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n\n specs = (\n RESOURCE_SPECS.get(cfn.regions[0])\n .get(\"ResourceTypes\")\n .get(resource_type, {})\n .get(\"Properties\", {})\n )\n resource_specs = (\n RESOURCE_SPECS.get(cfn.regions[0]).get(\"ResourceTypes\").get(resource_type)\n )\n matches.extend(self.check(cfn, properties, specs, resource_specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/AllowedPattern.py"}]} | 2,279 | 463 |
gh_patches_debug_41144 | rasdani/github-patches | git_diff | streamlink__streamlink-4029 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
artetv: de/fr Livestreams aren't playable anymore
### Checklist
- [X] This is a plugin issue and not a different kind of issue
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Latest build from the master branch
### Description
Since about a week the live channels aren't playable anymore. However VODs working fine.
### Debug log
```text
streamlink https://www.arte.tv/de/live/ worst -l debug
[cli][debug] OS: Linux-5.14.3-arch1-1-x86_64-with-glibc2.33
[cli][debug] Python: 3.9.7
[cli][debug] Streamlink: 2.4.0+17.g24c59a2
[cli][debug] Requests(2.26.0), Socks(1.7.1), Websocket(0.59.0)
[cli][debug] Arguments:
[cli][debug] url=https://www.arte.tv/de/live/
[cli][debug] stream=['worst']
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin artetv for URL https://www.arte.tv/de/live/
error: No playable streams found on this URL: https://www.arte.tv/de/live/
streamlink https://www.arte.tv/fr/direct/ best -l debug
[cli][debug] OS: Linux-5.14.3-arch1-1-x86_64-with-glibc2.33
[cli][debug] Python: 3.9.7
[cli][debug] Streamlink: 2.4.0+17.g24c59a2
[cli][debug] Requests(2.26.0), Socks(1.7.1), Websocket(0.59.0)
[cli][debug] Arguments:
[cli][debug] url=https://www.arte.tv/fr/direct/
[cli][debug] stream=['best']
[cli][debug] --loglevel=debug
[cli][info] Found matching plugin artetv for URL https://www.arte.tv/fr/direct/
error: No playable streams found on this URL: https://www.arte.tv/fr/direct/
```
plugins.arte: switch to arte.tv v2 API
The Arte.tv V1 API doens't seem to work anymore for live streams (see #4026).
Both web site and mobile app use the V2 API, which requires an authentication token. The one from the website is used here for this fix.
</issue>
<code>
[start of src/streamlink/plugins/artetv.py]
1 """Plugin for Arte.tv, bi-lingual art and culture channel."""
2
3 import logging
4 import re
5 from operator import itemgetter
6
7 from streamlink.plugin import Plugin, pluginmatcher
8 from streamlink.plugin.api import validate
9 from streamlink.stream import HLSStream
10
11 log = logging.getLogger(__name__)
12 JSON_VOD_URL = "https://api.arte.tv/api/player/v1/config/{0}/{1}?platform=ARTE_NEXT"
13 JSON_LIVE_URL = "https://api.arte.tv/api/player/v1/livestream/{0}"
14
15 _video_schema = validate.Schema({
16 "videoJsonPlayer": {
17 "VSR": validate.any(
18 [],
19 {
20 validate.text: {
21 "height": int,
22 "mediaType": validate.text,
23 "url": validate.text,
24 "versionProg": int,
25 "versionLibelle": validate.text
26 },
27 },
28 )
29 }
30 })
31
32
33 @pluginmatcher(re.compile(r"""
34 https?://(?:\w+\.)?arte\.tv/(?:guide/)?
35 (?P<language>[a-z]{2})/
36 (?:
37 (?:videos/)?(?P<video_id>(?!RC-|videos)[^/]+?)/.+
38 |
39 (?:direct|live)
40 )
41 """, re.VERBOSE))
42 class ArteTV(Plugin):
43 def _create_stream(self, streams):
44 variant, variantname = min([(stream["versionProg"], stream["versionLibelle"]) for stream in streams.values()],
45 key=itemgetter(0))
46 log.debug(f"Using the '{variantname}' stream variant")
47 for sname, stream in streams.items():
48 if stream["versionProg"] == variant:
49 if stream["mediaType"] == "hls":
50 try:
51 streams = HLSStream.parse_variant_playlist(self.session, stream["url"])
52 yield from streams.items()
53 except OSError as err:
54 log.warning(f"Failed to extract HLS streams for {sname}/{stream['versionLibelle']}: {err}")
55
56 def _get_streams(self):
57 language = self.match.group('language')
58 video_id = self.match.group('video_id')
59 if video_id is None:
60 json_url = JSON_LIVE_URL.format(language)
61 else:
62 json_url = JSON_VOD_URL.format(language, video_id)
63 res = self.session.http.get(json_url)
64 video = self.session.http.json(res, schema=_video_schema)
65
66 if not video["videoJsonPlayer"]["VSR"]:
67 return
68
69 vsr = video["videoJsonPlayer"]["VSR"]
70 return self._create_stream(vsr)
71
72
73 __plugin__ = ArteTV
74
[end of src/streamlink/plugins/artetv.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/artetv.py b/src/streamlink/plugins/artetv.py
--- a/src/streamlink/plugins/artetv.py
+++ b/src/streamlink/plugins/artetv.py
@@ -1,5 +1,3 @@
-"""Plugin for Arte.tv, bi-lingual art and culture channel."""
-
import logging
import re
from operator import itemgetter
@@ -9,25 +7,6 @@
from streamlink.stream import HLSStream
log = logging.getLogger(__name__)
-JSON_VOD_URL = "https://api.arte.tv/api/player/v1/config/{0}/{1}?platform=ARTE_NEXT"
-JSON_LIVE_URL = "https://api.arte.tv/api/player/v1/livestream/{0}"
-
-_video_schema = validate.Schema({
- "videoJsonPlayer": {
- "VSR": validate.any(
- [],
- {
- validate.text: {
- "height": int,
- "mediaType": validate.text,
- "url": validate.text,
- "versionProg": int,
- "versionLibelle": validate.text
- },
- },
- )
- }
-})
@pluginmatcher(re.compile(r"""
@@ -40,34 +19,49 @@
)
""", re.VERBOSE))
class ArteTV(Plugin):
- def _create_stream(self, streams):
- variant, variantname = min([(stream["versionProg"], stream["versionLibelle"]) for stream in streams.values()],
- key=itemgetter(0))
- log.debug(f"Using the '{variantname}' stream variant")
- for sname, stream in streams.items():
- if stream["versionProg"] == variant:
- if stream["mediaType"] == "hls":
- try:
- streams = HLSStream.parse_variant_playlist(self.session, stream["url"])
- yield from streams.items()
- except OSError as err:
- log.warning(f"Failed to extract HLS streams for {sname}/{stream['versionLibelle']}: {err}")
+ API_URL = "https://api.arte.tv/api/player/v2/config/{0}/{1}"
+ API_TOKEN = "MzYyZDYyYmM1Y2Q3ZWRlZWFjMmIyZjZjNTRiMGY4MzY4NzBhOWQ5YjE4MGQ1NGFiODJmOTFlZDQwN2FkOTZjMQ"
def _get_streams(self):
- language = self.match.group('language')
- video_id = self.match.group('video_id')
- if video_id is None:
- json_url = JSON_LIVE_URL.format(language)
- else:
- json_url = JSON_VOD_URL.format(language, video_id)
- res = self.session.http.get(json_url)
- video = self.session.http.json(res, schema=_video_schema)
+ language = self.match.group("language")
+ video_id = self.match.group("video_id")
- if not video["videoJsonPlayer"]["VSR"]:
+ json_url = self.API_URL.format(language, video_id or "LIVE")
+ headers = {
+ "Authorization": f"Bearer {self.API_TOKEN}"
+ }
+ streams, metadata = self.session.http.get(json_url, headers=headers, schema=validate.Schema(
+ validate.parse_json(),
+ {"data": {"attributes": {
+ "streams": validate.any(
+ [],
+ [
+ validate.all(
+ {
+ "url": validate.url(),
+ "slot": int,
+ "protocol": validate.any("HLS", "HLS_NG"),
+ },
+ validate.union_get("slot", "protocol", "url")
+ )
+ ]
+ ),
+ "metadata": {
+ "title": str,
+ "subtitle": validate.any(None, str)
+ }
+ }}},
+ validate.get(("data", "attributes")),
+ validate.union_get("streams", "metadata")
+ ))
+
+ if not streams:
return
- vsr = video["videoJsonPlayer"]["VSR"]
- return self._create_stream(vsr)
+ self.title = f"{metadata['title']} - {metadata['subtitle']}" if metadata["subtitle"] else metadata["title"]
+
+ for slot, protocol, url in sorted(streams, key=itemgetter(0)):
+ return HLSStream.parse_variant_playlist(self.session, url)
__plugin__ = ArteTV
| {"golden_diff": "diff --git a/src/streamlink/plugins/artetv.py b/src/streamlink/plugins/artetv.py\n--- a/src/streamlink/plugins/artetv.py\n+++ b/src/streamlink/plugins/artetv.py\n@@ -1,5 +1,3 @@\n-\"\"\"Plugin for Arte.tv, bi-lingual art and culture channel.\"\"\"\n-\n import logging\n import re\n from operator import itemgetter\n@@ -9,25 +7,6 @@\n from streamlink.stream import HLSStream\n \n log = logging.getLogger(__name__)\n-JSON_VOD_URL = \"https://api.arte.tv/api/player/v1/config/{0}/{1}?platform=ARTE_NEXT\"\n-JSON_LIVE_URL = \"https://api.arte.tv/api/player/v1/livestream/{0}\"\n-\n-_video_schema = validate.Schema({\n- \"videoJsonPlayer\": {\n- \"VSR\": validate.any(\n- [],\n- {\n- validate.text: {\n- \"height\": int,\n- \"mediaType\": validate.text,\n- \"url\": validate.text,\n- \"versionProg\": int,\n- \"versionLibelle\": validate.text\n- },\n- },\n- )\n- }\n-})\n \n \n @pluginmatcher(re.compile(r\"\"\"\n@@ -40,34 +19,49 @@\n )\n \"\"\", re.VERBOSE))\n class ArteTV(Plugin):\n- def _create_stream(self, streams):\n- variant, variantname = min([(stream[\"versionProg\"], stream[\"versionLibelle\"]) for stream in streams.values()],\n- key=itemgetter(0))\n- log.debug(f\"Using the '{variantname}' stream variant\")\n- for sname, stream in streams.items():\n- if stream[\"versionProg\"] == variant:\n- if stream[\"mediaType\"] == \"hls\":\n- try:\n- streams = HLSStream.parse_variant_playlist(self.session, stream[\"url\"])\n- yield from streams.items()\n- except OSError as err:\n- log.warning(f\"Failed to extract HLS streams for {sname}/{stream['versionLibelle']}: {err}\")\n+ API_URL = \"https://api.arte.tv/api/player/v2/config/{0}/{1}\"\n+ API_TOKEN = \"MzYyZDYyYmM1Y2Q3ZWRlZWFjMmIyZjZjNTRiMGY4MzY4NzBhOWQ5YjE4MGQ1NGFiODJmOTFlZDQwN2FkOTZjMQ\"\n \n def _get_streams(self):\n- language = self.match.group('language')\n- video_id = self.match.group('video_id')\n- if video_id is None:\n- json_url = JSON_LIVE_URL.format(language)\n- else:\n- json_url = JSON_VOD_URL.format(language, video_id)\n- res = self.session.http.get(json_url)\n- video = self.session.http.json(res, schema=_video_schema)\n+ language = self.match.group(\"language\")\n+ video_id = self.match.group(\"video_id\")\n \n- if not video[\"videoJsonPlayer\"][\"VSR\"]:\n+ json_url = self.API_URL.format(language, video_id or \"LIVE\")\n+ headers = {\n+ \"Authorization\": f\"Bearer {self.API_TOKEN}\"\n+ }\n+ streams, metadata = self.session.http.get(json_url, headers=headers, schema=validate.Schema(\n+ validate.parse_json(),\n+ {\"data\": {\"attributes\": {\n+ \"streams\": validate.any(\n+ [],\n+ [\n+ validate.all(\n+ {\n+ \"url\": validate.url(),\n+ \"slot\": int,\n+ \"protocol\": validate.any(\"HLS\", \"HLS_NG\"),\n+ },\n+ validate.union_get(\"slot\", \"protocol\", \"url\")\n+ )\n+ ]\n+ ),\n+ \"metadata\": {\n+ \"title\": str,\n+ \"subtitle\": validate.any(None, str)\n+ }\n+ }}},\n+ validate.get((\"data\", \"attributes\")),\n+ validate.union_get(\"streams\", \"metadata\")\n+ ))\n+\n+ if not streams:\n return\n \n- vsr = video[\"videoJsonPlayer\"][\"VSR\"]\n- return self._create_stream(vsr)\n+ self.title = f\"{metadata['title']} - {metadata['subtitle']}\" if metadata[\"subtitle\"] else metadata[\"title\"]\n+\n+ for slot, protocol, url in sorted(streams, key=itemgetter(0)):\n+ return HLSStream.parse_variant_playlist(self.session, url)\n \n \n __plugin__ = ArteTV\n", "issue": "artetv: de/fr Livestreams aren't playable anymore\n### Checklist\n\n- [X] This is a plugin issue and not a different kind of issue\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nLatest build from the master branch\n\n### Description\n\nSince about a week the live channels aren't playable anymore. However VODs working fine.\r\n\n\n### Debug log\n\n```text\nstreamlink https://www.arte.tv/de/live/ worst -l debug\r\n[cli][debug] OS: Linux-5.14.3-arch1-1-x86_64-with-glibc2.33\r\n[cli][debug] Python: 3.9.7\r\n[cli][debug] Streamlink: 2.4.0+17.g24c59a2\r\n[cli][debug] Requests(2.26.0), Socks(1.7.1), Websocket(0.59.0)\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://www.arte.tv/de/live/\r\n[cli][debug] stream=['worst']\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin artetv for URL https://www.arte.tv/de/live/\r\nerror: No playable streams found on this URL: https://www.arte.tv/de/live/\r\n\r\nstreamlink https://www.arte.tv/fr/direct/ best -l debug\r\n[cli][debug] OS: Linux-5.14.3-arch1-1-x86_64-with-glibc2.33\r\n[cli][debug] Python: 3.9.7\r\n[cli][debug] Streamlink: 2.4.0+17.g24c59a2\r\n[cli][debug] Requests(2.26.0), Socks(1.7.1), Websocket(0.59.0)\r\n[cli][debug] Arguments:\r\n[cli][debug] url=https://www.arte.tv/fr/direct/\r\n[cli][debug] stream=['best']\r\n[cli][debug] --loglevel=debug\r\n[cli][info] Found matching plugin artetv for URL https://www.arte.tv/fr/direct/\r\nerror: No playable streams found on this URL: https://www.arte.tv/fr/direct/\n```\n\nplugins.arte: switch to arte.tv v2 API\nThe Arte.tv V1 API doens't seem to work anymore for live streams (see #4026).\r\n\r\nBoth web site and mobile app use the V2 API, which requires an authentication token. The one from the website is used here for this fix.\n", "before_files": [{"content": "\"\"\"Plugin for Arte.tv, bi-lingual art and culture channel.\"\"\"\n\nimport logging\nimport re\nfrom operator import itemgetter\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\nlog = logging.getLogger(__name__)\nJSON_VOD_URL = \"https://api.arte.tv/api/player/v1/config/{0}/{1}?platform=ARTE_NEXT\"\nJSON_LIVE_URL = \"https://api.arte.tv/api/player/v1/livestream/{0}\"\n\n_video_schema = validate.Schema({\n \"videoJsonPlayer\": {\n \"VSR\": validate.any(\n [],\n {\n validate.text: {\n \"height\": int,\n \"mediaType\": validate.text,\n \"url\": validate.text,\n \"versionProg\": int,\n \"versionLibelle\": validate.text\n },\n },\n )\n }\n})\n\n\n@pluginmatcher(re.compile(r\"\"\"\n https?://(?:\\w+\\.)?arte\\.tv/(?:guide/)?\n (?P<language>[a-z]{2})/\n (?:\n (?:videos/)?(?P<video_id>(?!RC-|videos)[^/]+?)/.+\n |\n (?:direct|live)\n )\n\"\"\", re.VERBOSE))\nclass ArteTV(Plugin):\n def _create_stream(self, streams):\n variant, variantname = min([(stream[\"versionProg\"], stream[\"versionLibelle\"]) for stream in streams.values()],\n key=itemgetter(0))\n log.debug(f\"Using the '{variantname}' stream variant\")\n for sname, stream in streams.items():\n if stream[\"versionProg\"] == variant:\n if stream[\"mediaType\"] == \"hls\":\n try:\n streams = HLSStream.parse_variant_playlist(self.session, stream[\"url\"])\n yield from streams.items()\n except OSError as err:\n log.warning(f\"Failed to extract HLS streams for {sname}/{stream['versionLibelle']}: {err}\")\n\n def _get_streams(self):\n language = self.match.group('language')\n video_id = self.match.group('video_id')\n if video_id is None:\n json_url = JSON_LIVE_URL.format(language)\n else:\n json_url = JSON_VOD_URL.format(language, video_id)\n res = self.session.http.get(json_url)\n video = self.session.http.json(res, schema=_video_schema)\n\n if not video[\"videoJsonPlayer\"][\"VSR\"]:\n return\n\n vsr = video[\"videoJsonPlayer\"][\"VSR\"]\n return self._create_stream(vsr)\n\n\n__plugin__ = ArteTV\n", "path": "src/streamlink/plugins/artetv.py"}]} | 1,925 | 990 |
gh_patches_debug_31351 | rasdani/github-patches | git_diff | iterative__dvc-2646 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
get/import: could not perform a HEAD request
```
DVC version: 0.62.1
Python version: 3.7.3
Platform: Darwin-18.7.0-x86_64-i386-64bit
Binary: False
Cache: reflink - True, hardlink - True, symlink - True
Filesystem type (cache directory): ('apfs', '/dev/disk1s1')
Filesystem type (workspace): ('apfs', '/dev/disk1s1')
```
I'm trying to import a directory versioned in our own [dataset registry](https://github.com/iterative/dataset-registry) project into an empty, non-Git DVC project, but getting this cryptic error:
```console
$ dvc import --rev 0547f58 \
[email protected]:iterative/dataset-registry.git \
use-cases/data
Importing 'use-cases/data ([email protected]:iterative/dataset-registry.git)' -> 'data'
ERROR: failed to import 'use-cases/data' from '[email protected]:iterative/dataset-registry.git'. - unable to find DVC-file with output '../../../../private/var/folders/_c/3mt_xn_d4xl2ddsx2m98h_r40000gn/T/tmphs83czecdvc-repo/use-cases/data'
Having any troubles? Hit us up at https://dvc.org/support, we are always happy to help!
```
The directory in question has file name `b6923e1e4ad16ea1a7e2b328842d56a2.dir ` (See [use-cases/cats-dogs.dvc](https://github.com/iterative/dataset-registry/blob/0547f58/use-cases/cats-dogs.dvc) of that version). And the default remote is [configured[(https://github.com/iterative/dataset-registry/blob/master/.dvc/config) to https://remote.dvc.org/dataset-registry (which is an HTTP redirect to the s3://dvc-public/remote/dataset-registry bucket). ~~The file seems to be in the remote~~
Am I just doing something wrong here (hopefully), or is `dvc import` broken?
</issue>
<code>
[start of dvc/remote/http.py]
1 from __future__ import unicode_literals
2
3 import logging
4 from dvc.scheme import Schemes
5 from dvc.utils.compat import open
6
7 from dvc.progress import Tqdm
8 from dvc.exceptions import DvcException
9 from dvc.config import Config, ConfigError
10 from dvc.remote.base import RemoteBASE
11
12 logger = logging.getLogger(__name__)
13
14
15 class RemoteHTTP(RemoteBASE):
16 scheme = Schemes.HTTP
17 REQUEST_TIMEOUT = 10
18 CHUNK_SIZE = 2 ** 16
19 PARAM_CHECKSUM = "etag"
20
21 def __init__(self, repo, config):
22 super(RemoteHTTP, self).__init__(repo, config)
23
24 url = config.get(Config.SECTION_REMOTE_URL)
25 self.path_info = self.path_cls(url) if url else None
26
27 if not self.no_traverse:
28 raise ConfigError(
29 "HTTP doesn't support traversing the remote to list existing "
30 "files. Use: `dvc remote modify <name> no_traverse true`"
31 )
32
33 def _download(self, from_info, to_file, name=None, no_progress_bar=False):
34 request = self._request("GET", from_info.url, stream=True)
35 with Tqdm(
36 total=None if no_progress_bar else self._content_length(from_info),
37 leave=False,
38 bytes=True,
39 desc=from_info.url if name is None else name,
40 disable=no_progress_bar,
41 ) as pbar:
42 with open(to_file, "wb") as fd:
43 for chunk in request.iter_content(chunk_size=self.CHUNK_SIZE):
44 fd.write(chunk)
45 fd.flush()
46 pbar.update(len(chunk))
47
48 def exists(self, path_info):
49 return bool(self._request("HEAD", path_info.url))
50
51 def _content_length(self, url_or_request):
52 headers = getattr(
53 url_or_request,
54 "headers",
55 self._request("HEAD", url_or_request).headers,
56 )
57 res = headers.get("Content-Length")
58 return int(res) if res else None
59
60 def get_file_checksum(self, path_info):
61 url = path_info.url
62 headers = self._request("HEAD", url).headers
63 etag = headers.get("ETag") or headers.get("Content-MD5")
64
65 if not etag:
66 raise DvcException(
67 "could not find an ETag or "
68 "Content-MD5 header for '{url}'".format(url=url)
69 )
70
71 if etag.startswith("W/"):
72 raise DvcException(
73 "Weak ETags are not supported."
74 " (Etag: '{etag}', URL: '{url}')".format(etag=etag, url=url)
75 )
76
77 return etag
78
79 def _request(self, method, url, **kwargs):
80 import requests
81
82 kwargs.setdefault("allow_redirects", True)
83 kwargs.setdefault("timeout", self.REQUEST_TIMEOUT)
84
85 try:
86 return requests.request(method, url, **kwargs)
87 except requests.exceptions.RequestException:
88 raise DvcException("could not perform a {} request".format(method))
89
90 def gc(self):
91 raise NotImplementedError
92
[end of dvc/remote/http.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/dvc/remote/http.py b/dvc/remote/http.py
--- a/dvc/remote/http.py
+++ b/dvc/remote/http.py
@@ -1,9 +1,11 @@
from __future__ import unicode_literals
import logging
+
+from funcy import cached_property
+
from dvc.scheme import Schemes
from dvc.utils.compat import open
-
from dvc.progress import Tqdm
from dvc.exceptions import DvcException
from dvc.config import Config, ConfigError
@@ -14,6 +16,8 @@
class RemoteHTTP(RemoteBASE):
scheme = Schemes.HTTP
+ SESSION_RETRIES = 5
+ SESSION_BACKOFF_FACTOR = 0.1
REQUEST_TIMEOUT = 10
CHUNK_SIZE = 2 ** 16
PARAM_CHECKSUM = "etag"
@@ -76,6 +80,24 @@
return etag
+ @cached_property
+ def _session(self):
+ import requests
+ from requests.adapters import HTTPAdapter
+ from urllib3.util.retry import Retry
+
+ session = requests.Session()
+
+ retries = Retry(
+ total=self.SESSION_RETRIES,
+ backoff_factor=self.SESSION_BACKOFF_FACTOR,
+ )
+
+ session.mount("http://", HTTPAdapter(max_retries=retries))
+ session.mount("https://", HTTPAdapter(max_retries=retries))
+
+ return session
+
def _request(self, method, url, **kwargs):
import requests
@@ -83,7 +105,7 @@
kwargs.setdefault("timeout", self.REQUEST_TIMEOUT)
try:
- return requests.request(method, url, **kwargs)
+ return self._session.request(method, url, **kwargs)
except requests.exceptions.RequestException:
raise DvcException("could not perform a {} request".format(method))
| {"golden_diff": "diff --git a/dvc/remote/http.py b/dvc/remote/http.py\n--- a/dvc/remote/http.py\n+++ b/dvc/remote/http.py\n@@ -1,9 +1,11 @@\n from __future__ import unicode_literals\n \n import logging\n+\n+from funcy import cached_property\n+\n from dvc.scheme import Schemes\n from dvc.utils.compat import open\n-\n from dvc.progress import Tqdm\n from dvc.exceptions import DvcException\n from dvc.config import Config, ConfigError\n@@ -14,6 +16,8 @@\n \n class RemoteHTTP(RemoteBASE):\n scheme = Schemes.HTTP\n+ SESSION_RETRIES = 5\n+ SESSION_BACKOFF_FACTOR = 0.1\n REQUEST_TIMEOUT = 10\n CHUNK_SIZE = 2 ** 16\n PARAM_CHECKSUM = \"etag\"\n@@ -76,6 +80,24 @@\n \n return etag\n \n+ @cached_property\n+ def _session(self):\n+ import requests\n+ from requests.adapters import HTTPAdapter\n+ from urllib3.util.retry import Retry\n+\n+ session = requests.Session()\n+\n+ retries = Retry(\n+ total=self.SESSION_RETRIES,\n+ backoff_factor=self.SESSION_BACKOFF_FACTOR,\n+ )\n+\n+ session.mount(\"http://\", HTTPAdapter(max_retries=retries))\n+ session.mount(\"https://\", HTTPAdapter(max_retries=retries))\n+\n+ return session\n+\n def _request(self, method, url, **kwargs):\n import requests\n \n@@ -83,7 +105,7 @@\n kwargs.setdefault(\"timeout\", self.REQUEST_TIMEOUT)\n \n try:\n- return requests.request(method, url, **kwargs)\n+ return self._session.request(method, url, **kwargs)\n except requests.exceptions.RequestException:\n raise DvcException(\"could not perform a {} request\".format(method))\n", "issue": "get/import: could not perform a HEAD request\n```\r\nDVC version: 0.62.1\r\nPython version: 3.7.3\r\nPlatform: Darwin-18.7.0-x86_64-i386-64bit\r\nBinary: False\r\nCache: reflink - True, hardlink - True, symlink - True\r\nFilesystem type (cache directory): ('apfs', '/dev/disk1s1')\r\nFilesystem type (workspace): ('apfs', '/dev/disk1s1')\r\n```\r\n\r\nI'm trying to import a directory versioned in our own [dataset registry](https://github.com/iterative/dataset-registry) project into an empty, non-Git DVC project, but getting this cryptic error:\r\n\r\n```console\r\n$ dvc import --rev 0547f58 \\ \r\n [email protected]:iterative/dataset-registry.git \\\r\n use-cases/data\r\nImporting 'use-cases/data ([email protected]:iterative/dataset-registry.git)' -> 'data'\r\nERROR: failed to import 'use-cases/data' from '[email protected]:iterative/dataset-registry.git'. - unable to find DVC-file with output '../../../../private/var/folders/_c/3mt_xn_d4xl2ddsx2m98h_r40000gn/T/tmphs83czecdvc-repo/use-cases/data'\r\n\r\nHaving any troubles? Hit us up at https://dvc.org/support, we are always happy to help!\r\n```\r\n\r\nThe directory in question has file name `b6923e1e4ad16ea1a7e2b328842d56a2.dir ` (See [use-cases/cats-dogs.dvc](https://github.com/iterative/dataset-registry/blob/0547f58/use-cases/cats-dogs.dvc) of that version). And the default remote is [configured[(https://github.com/iterative/dataset-registry/blob/master/.dvc/config) to https://remote.dvc.org/dataset-registry (which is an HTTP redirect to the s3://dvc-public/remote/dataset-registry bucket). ~~The file seems to be in the remote~~\r\n\r\nAm I just doing something wrong here (hopefully), or is `dvc import` broken?\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nfrom dvc.scheme import Schemes\nfrom dvc.utils.compat import open\n\nfrom dvc.progress import Tqdm\nfrom dvc.exceptions import DvcException\nfrom dvc.config import Config, ConfigError\nfrom dvc.remote.base import RemoteBASE\n\nlogger = logging.getLogger(__name__)\n\n\nclass RemoteHTTP(RemoteBASE):\n scheme = Schemes.HTTP\n REQUEST_TIMEOUT = 10\n CHUNK_SIZE = 2 ** 16\n PARAM_CHECKSUM = \"etag\"\n\n def __init__(self, repo, config):\n super(RemoteHTTP, self).__init__(repo, config)\n\n url = config.get(Config.SECTION_REMOTE_URL)\n self.path_info = self.path_cls(url) if url else None\n\n if not self.no_traverse:\n raise ConfigError(\n \"HTTP doesn't support traversing the remote to list existing \"\n \"files. Use: `dvc remote modify <name> no_traverse true`\"\n )\n\n def _download(self, from_info, to_file, name=None, no_progress_bar=False):\n request = self._request(\"GET\", from_info.url, stream=True)\n with Tqdm(\n total=None if no_progress_bar else self._content_length(from_info),\n leave=False,\n bytes=True,\n desc=from_info.url if name is None else name,\n disable=no_progress_bar,\n ) as pbar:\n with open(to_file, \"wb\") as fd:\n for chunk in request.iter_content(chunk_size=self.CHUNK_SIZE):\n fd.write(chunk)\n fd.flush()\n pbar.update(len(chunk))\n\n def exists(self, path_info):\n return bool(self._request(\"HEAD\", path_info.url))\n\n def _content_length(self, url_or_request):\n headers = getattr(\n url_or_request,\n \"headers\",\n self._request(\"HEAD\", url_or_request).headers,\n )\n res = headers.get(\"Content-Length\")\n return int(res) if res else None\n\n def get_file_checksum(self, path_info):\n url = path_info.url\n headers = self._request(\"HEAD\", url).headers\n etag = headers.get(\"ETag\") or headers.get(\"Content-MD5\")\n\n if not etag:\n raise DvcException(\n \"could not find an ETag or \"\n \"Content-MD5 header for '{url}'\".format(url=url)\n )\n\n if etag.startswith(\"W/\"):\n raise DvcException(\n \"Weak ETags are not supported.\"\n \" (Etag: '{etag}', URL: '{url}')\".format(etag=etag, url=url)\n )\n\n return etag\n\n def _request(self, method, url, **kwargs):\n import requests\n\n kwargs.setdefault(\"allow_redirects\", True)\n kwargs.setdefault(\"timeout\", self.REQUEST_TIMEOUT)\n\n try:\n return requests.request(method, url, **kwargs)\n except requests.exceptions.RequestException:\n raise DvcException(\"could not perform a {} request\".format(method))\n\n def gc(self):\n raise NotImplementedError\n", "path": "dvc/remote/http.py"}]} | 1,895 | 417 |
gh_patches_debug_17448 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2064 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Block Google from Indexing our Development Environments
## Test plan
GIVEN all dev environments (Test / UAT)
WHEN looking at the _head_ tag
THEN a _meta name="robots" content="none"_ node should be added
GIVEN the live environment
WHEN looking at the _head_ tag
THEN a _meta name="robots" content="none"_ node should not be added
## Issue description
We should add a robots.txt to all NON LIVE machines that prevents Google from indexing the site and displaying the content in search results.
This looks to be pretty simple: https://support.google.com/webmasters/answer/156449?rd=1
</issue>
<code>
[start of akvo/rsr/context_processors.py]
1 # -*- coding: utf-8 -*-
2 """
3 Akvo RSR is covered by the GNU Affero General Public License.
4
5 See more details in the license.txt file located at the root folder of the
6 Akvo RSR module. For additional details on the GNU license please see
7 < http://www.gnu.org/licenses/agpl.html >.
8 """
9
10 import django
11
12 from django.conf import settings
13 from django.core.exceptions import DisallowedHost
14 from django.contrib.sites.models import get_current_site
15
16
17 def extra_context(request, protocol="http"):
18 """Add information to the request context."""
19 try:
20 current_site = get_current_site(request)
21 except DisallowedHost:
22 current_site = None
23
24 django_version = django.get_version()
25 deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown')
26 deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown')
27 deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')
28 deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')
29
30 return dict(
31 current_site=current_site,
32 django_version=django_version,
33 deploy_tag=deploy_tag,
34 deploy_branch=deploy_branch,
35 deploy_commit_id=deploy_commit_id,
36 deploy_commit_full_id=deploy_commit_full_id
37 )
38
39
40 def get_current_path_without_lang(request):
41 """Return current path without lang."""
42 path = request.get_full_path()
43 path_bits = path.split('/')
44 path = '/'.join(path_bits[2:])
45 return {'current_path_without_lang': path}
46
47
48 def extra_pages_context(request):
49 """Add context information of an RSR Page."""
50 if request.rsr_page:
51 page = request.rsr_page
52 return {
53 'rsr_page': page,
54 'favicon': page.favicon,
55 'logo': page.logo,
56 'organisation': page.organisation,
57 'return_url': page.return_url,
58 'return_url_text': page.custom_return_url_text,
59 'stylesheet': page.stylesheet,
60 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),
61 'domain_url': '//{}'.format(settings.RSR_DOMAIN),
62 'no_facebook': not page.facebook_button,
63 'facebook_app_id': page.facebook_app_id,
64 'no_twitter': not page.twitter_button,
65 }
66
67 return {}
68
[end of akvo/rsr/context_processors.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/akvo/rsr/context_processors.py b/akvo/rsr/context_processors.py
--- a/akvo/rsr/context_processors.py
+++ b/akvo/rsr/context_processors.py
@@ -22,6 +22,7 @@
current_site = None
django_version = django.get_version()
+ debug = getattr(settings, 'DEBUG', False)
deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown')
deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown')
deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')
@@ -30,6 +31,7 @@
return dict(
current_site=current_site,
django_version=django_version,
+ debug=debug,
deploy_tag=deploy_tag,
deploy_branch=deploy_branch,
deploy_commit_id=deploy_commit_id,
| {"golden_diff": "diff --git a/akvo/rsr/context_processors.py b/akvo/rsr/context_processors.py\n--- a/akvo/rsr/context_processors.py\n+++ b/akvo/rsr/context_processors.py\n@@ -22,6 +22,7 @@\n current_site = None\n \n django_version = django.get_version()\n+ debug = getattr(settings, 'DEBUG', False)\n deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown')\n deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown')\n deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')\n@@ -30,6 +31,7 @@\n return dict(\n current_site=current_site,\n django_version=django_version,\n+ debug=debug,\n deploy_tag=deploy_tag,\n deploy_branch=deploy_branch,\n deploy_commit_id=deploy_commit_id,\n", "issue": "Block Google from Indexing our Development Environments\n## Test plan\n\nGIVEN all dev environments (Test / UAT)\nWHEN looking at the _head_ tag\nTHEN a _meta name=\"robots\" content=\"none\"_ node should be added\n\nGIVEN the live environment\nWHEN looking at the _head_ tag\nTHEN a _meta name=\"robots\" content=\"none\"_ node should not be added\n## Issue description\n\nWe should add a robots.txt to all NON LIVE machines that prevents Google from indexing the site and displaying the content in search results.\n\nThis looks to be pretty simple: https://support.google.com/webmasters/answer/156449?rd=1\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nAkvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please see\n< http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nimport django\n\nfrom django.conf import settings\nfrom django.core.exceptions import DisallowedHost\nfrom django.contrib.sites.models import get_current_site\n\n\ndef extra_context(request, protocol=\"http\"):\n \"\"\"Add information to the request context.\"\"\"\n try:\n current_site = get_current_site(request)\n except DisallowedHost:\n current_site = None\n\n django_version = django.get_version()\n deploy_tag = getattr(settings, 'DEPLOY_TAG', 'Unknown')\n deploy_branch = getattr(settings, 'DEPLOY_BRANCH', 'Unknown')\n deploy_commit_id = getattr(settings, 'DEPLOY_COMMIT_ID', 'Unknown')\n deploy_commit_full_id = getattr(settings, 'DEPLOY_COMMIT_FULL_ID', 'Unknown')\n\n return dict(\n current_site=current_site,\n django_version=django_version,\n deploy_tag=deploy_tag,\n deploy_branch=deploy_branch,\n deploy_commit_id=deploy_commit_id,\n deploy_commit_full_id=deploy_commit_full_id\n )\n\n\ndef get_current_path_without_lang(request):\n \"\"\"Return current path without lang.\"\"\"\n path = request.get_full_path()\n path_bits = path.split('/')\n path = '/'.join(path_bits[2:])\n return {'current_path_without_lang': path}\n\n\ndef extra_pages_context(request):\n \"\"\"Add context information of an RSR Page.\"\"\"\n if request.rsr_page:\n page = request.rsr_page\n return {\n 'rsr_page': page,\n 'favicon': page.favicon,\n 'logo': page.logo,\n 'organisation': page.organisation,\n 'return_url': page.return_url,\n 'return_url_text': page.custom_return_url_text,\n 'stylesheet': page.stylesheet,\n 'akvoapp_root_url': '//{}'.format(settings.AKVOAPP_DOMAIN),\n 'domain_url': '//{}'.format(settings.RSR_DOMAIN),\n 'no_facebook': not page.facebook_button,\n 'facebook_app_id': page.facebook_app_id,\n 'no_twitter': not page.twitter_button,\n }\n\n return {}\n", "path": "akvo/rsr/context_processors.py"}]} | 1,302 | 187 |
gh_patches_debug_13551 | rasdani/github-patches | git_diff | vyperlang__vyper-3287 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`FunctionNodeVisitor` visits twice `sqrt` body
### Version Information
* vyper Version (output of `vyper --version`): 0.3.8+commit.d76c6ed2
* OS: OSX
* Python Version (output of `python --version`): 3.8.0
### What's your issue about?
The `FunctionNodeVisitor` seems to visit twice the body of `sqrt` builtin, the first time is in the `__init__` function of the `FunctionNodeVisitor` and the second time after its creation using a `for` loop over its body.
https://github.com/vyperlang/vyper/blob/187ab0eec8efbe19ed5046e4e947249e9d43141c/vyper/builtins/_utils.py#L28-L30
https://github.com/vyperlang/vyper/blob/187ab0eec8efbe19ed5046e4e947249e9d43141c/vyper/semantics/analysis/local.py#L178-L179
</issue>
<code>
[start of vyper/builtins/_utils.py]
1 from vyper.ast import parse_to_ast
2 from vyper.codegen.context import Context
3 from vyper.codegen.global_context import GlobalContext
4 from vyper.codegen.stmt import parse_body
5 from vyper.semantics.analysis.local import FunctionNodeVisitor
6 from vyper.semantics.namespace import Namespace, override_global_namespace
7 from vyper.semantics.types.function import ContractFunctionT, FunctionVisibility, StateMutability
8
9
10 def _strip_source_pos(ir_node):
11 ir_node.source_pos = None
12 for x in ir_node.args:
13 _strip_source_pos(x)
14
15
16 def generate_inline_function(code, variables, variables_2, memory_allocator):
17 ast_code = parse_to_ast(code, add_fn_node="dummy_fn")
18 # Annotate the AST with a temporary old (i.e. typecheck) namespace
19 namespace = Namespace()
20 namespace.update(variables_2)
21 with override_global_namespace(namespace):
22 # Initialise a placeholder `FunctionDef` AST node and corresponding
23 # `ContractFunctionT` type to rely on the annotation visitors in semantics
24 # module.
25 ast_code.body[0]._metadata["type"] = ContractFunctionT(
26 "sqrt_builtin", {}, 0, 0, None, FunctionVisibility.INTERNAL, StateMutability.NONPAYABLE
27 )
28 sv = FunctionNodeVisitor(ast_code, ast_code.body[0], namespace)
29 for n in ast_code.body[0].body:
30 sv.visit(n)
31
32 new_context = Context(
33 vars_=variables, global_ctx=GlobalContext(), memory_allocator=memory_allocator
34 )
35 generated_ir = parse_body(ast_code.body[0].body, new_context)
36 # strip source position info from the generated_ir since
37 # it doesn't make any sense (e.g. the line numbers will start from 0
38 # instead of where we are in the code)
39 # NOTE if we ever use this for inlining user-code, it would make
40 # sense to fix the offsets of the source positions in the generated
41 # code instead of stripping them.
42 _strip_source_pos(generated_ir)
43 return new_context, generated_ir
44
[end of vyper/builtins/_utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vyper/builtins/_utils.py b/vyper/builtins/_utils.py
--- a/vyper/builtins/_utils.py
+++ b/vyper/builtins/_utils.py
@@ -25,9 +25,9 @@
ast_code.body[0]._metadata["type"] = ContractFunctionT(
"sqrt_builtin", {}, 0, 0, None, FunctionVisibility.INTERNAL, StateMutability.NONPAYABLE
)
- sv = FunctionNodeVisitor(ast_code, ast_code.body[0], namespace)
- for n in ast_code.body[0].body:
- sv.visit(n)
+ # The FunctionNodeVisitor's constructor performs semantic checks
+ # annotate the AST as side effects.
+ FunctionNodeVisitor(ast_code, ast_code.body[0], namespace)
new_context = Context(
vars_=variables, global_ctx=GlobalContext(), memory_allocator=memory_allocator
| {"golden_diff": "diff --git a/vyper/builtins/_utils.py b/vyper/builtins/_utils.py\n--- a/vyper/builtins/_utils.py\n+++ b/vyper/builtins/_utils.py\n@@ -25,9 +25,9 @@\n ast_code.body[0]._metadata[\"type\"] = ContractFunctionT(\n \"sqrt_builtin\", {}, 0, 0, None, FunctionVisibility.INTERNAL, StateMutability.NONPAYABLE\n )\n- sv = FunctionNodeVisitor(ast_code, ast_code.body[0], namespace)\n- for n in ast_code.body[0].body:\n- sv.visit(n)\n+ # The FunctionNodeVisitor's constructor performs semantic checks\n+ # annotate the AST as side effects.\n+ FunctionNodeVisitor(ast_code, ast_code.body[0], namespace)\n \n new_context = Context(\n vars_=variables, global_ctx=GlobalContext(), memory_allocator=memory_allocator\n", "issue": "`FunctionNodeVisitor` visits twice `sqrt` body\n### Version Information\r\n\r\n* vyper Version (output of `vyper --version`): 0.3.8+commit.d76c6ed2\r\n* OS: OSX\r\n* Python Version (output of `python --version`): 3.8.0\r\n\r\n### What's your issue about?\r\n\r\nThe `FunctionNodeVisitor` seems to visit twice the body of `sqrt` builtin, the first time is in the `__init__` function of the `FunctionNodeVisitor` and the second time after its creation using a `for` loop over its body.\r\nhttps://github.com/vyperlang/vyper/blob/187ab0eec8efbe19ed5046e4e947249e9d43141c/vyper/builtins/_utils.py#L28-L30\r\n\r\nhttps://github.com/vyperlang/vyper/blob/187ab0eec8efbe19ed5046e4e947249e9d43141c/vyper/semantics/analysis/local.py#L178-L179\r\n\n", "before_files": [{"content": "from vyper.ast import parse_to_ast\nfrom vyper.codegen.context import Context\nfrom vyper.codegen.global_context import GlobalContext\nfrom vyper.codegen.stmt import parse_body\nfrom vyper.semantics.analysis.local import FunctionNodeVisitor\nfrom vyper.semantics.namespace import Namespace, override_global_namespace\nfrom vyper.semantics.types.function import ContractFunctionT, FunctionVisibility, StateMutability\n\n\ndef _strip_source_pos(ir_node):\n ir_node.source_pos = None\n for x in ir_node.args:\n _strip_source_pos(x)\n\n\ndef generate_inline_function(code, variables, variables_2, memory_allocator):\n ast_code = parse_to_ast(code, add_fn_node=\"dummy_fn\")\n # Annotate the AST with a temporary old (i.e. typecheck) namespace\n namespace = Namespace()\n namespace.update(variables_2)\n with override_global_namespace(namespace):\n # Initialise a placeholder `FunctionDef` AST node and corresponding\n # `ContractFunctionT` type to rely on the annotation visitors in semantics\n # module.\n ast_code.body[0]._metadata[\"type\"] = ContractFunctionT(\n \"sqrt_builtin\", {}, 0, 0, None, FunctionVisibility.INTERNAL, StateMutability.NONPAYABLE\n )\n sv = FunctionNodeVisitor(ast_code, ast_code.body[0], namespace)\n for n in ast_code.body[0].body:\n sv.visit(n)\n\n new_context = Context(\n vars_=variables, global_ctx=GlobalContext(), memory_allocator=memory_allocator\n )\n generated_ir = parse_body(ast_code.body[0].body, new_context)\n # strip source position info from the generated_ir since\n # it doesn't make any sense (e.g. the line numbers will start from 0\n # instead of where we are in the code)\n # NOTE if we ever use this for inlining user-code, it would make\n # sense to fix the offsets of the source positions in the generated\n # code instead of stripping them.\n _strip_source_pos(generated_ir)\n return new_context, generated_ir\n", "path": "vyper/builtins/_utils.py"}]} | 1,326 | 200 |
gh_patches_debug_22916 | rasdani/github-patches | git_diff | quantumlib__Cirq-1018 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add `_phase_by_` magic method to `ControlledGate`
Comes after https://github.com/quantumlib/Cirq/issues/947
The logic is as follows: if the qubit index is 0 (the control), the operation is returned unchanged. If it is larger then we delegate to phasing the sub gate with `cirq.phase_by` and a default result of NotImplemented. If it's NotImplemented, we return NotImplemented. Otherwise we return a controlled gate with the phased sub gate.
</issue>
<code>
[start of cirq/ops/controlled_gate.py]
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Union, Sequence, Any
16
17 import numpy as np
18
19 from cirq import linalg, protocols
20 from cirq.ops import raw_types
21 from cirq.type_workarounds import NotImplementedType
22
23
24 class ControlledGate(raw_types.Gate):
25 """Augments existing gates with a control qubit."""
26
27 def __init__(self, sub_gate: raw_types.Gate) -> None:
28 """Initializes the controlled gate.
29
30 Args:
31 sub_gate: The gate to add a control qubit to.
32 default_extensions: The extensions method that should be used when
33 determining if the controlled gate supports certain gate
34 features. For example, if this extensions instance is able to
35 cast sub_gate to a ExtrapolatableEffect then the controlled gate
36 can also be cast to a ExtrapolatableEffect. When this value is
37 None, an empty extensions instance is used instead.
38 """
39 self.sub_gate = sub_gate
40
41 def validate_args(self, qubits) -> None:
42 if len(qubits) < 1:
43 raise ValueError('No control qubit specified.')
44 self.sub_gate.validate_args(qubits[1:])
45
46 def __eq__(self, other):
47 if not isinstance(other, type(self)):
48 return NotImplemented
49 return self.sub_gate == other.sub_gate
50
51 def __ne__(self, other):
52 return not self == other
53
54 def __hash__(self):
55 return hash((ControlledGate, self.sub_gate))
56
57 def _apply_unitary_to_tensor_(self,
58 target_tensor: np.ndarray,
59 available_buffer: np.ndarray,
60 axes: Sequence[int],
61 ) -> np.ndarray:
62 control = axes[0]
63 rest = axes[1:]
64 active = linalg.slice_for_qubits_equal_to([control], 1)
65 sub_axes = [r - int(r > control) for r in rest]
66 target_view = target_tensor[active]
67 buffer_view = available_buffer[active]
68 result = protocols.apply_unitary_to_tensor(
69 self.sub_gate,
70 target_view,
71 buffer_view,
72 sub_axes,
73 default=NotImplemented)
74
75 if result is NotImplemented:
76 return NotImplemented
77
78 if result is target_view:
79 return target_tensor
80
81 if result is buffer_view:
82 inactive = linalg.slice_for_qubits_equal_to([control], 0)
83 available_buffer[inactive] = target_tensor[inactive]
84 return available_buffer
85
86 # HACK: assume they didn't somehow escape the slice view and edit the
87 # rest of target_tensor.
88 target_tensor[active] = result
89 return target_tensor
90
91 def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:
92 sub_matrix = protocols.unitary(self.sub_gate, None)
93 if sub_matrix is None:
94 return NotImplemented
95 return linalg.block_diag(np.eye(sub_matrix.shape[0]), sub_matrix)
96
97 def __pow__(self, exponent: Any) -> 'ControlledGate':
98 new_sub_gate = protocols.pow(self.sub_gate,
99 exponent,
100 NotImplemented)
101 if new_sub_gate is NotImplemented:
102 return NotImplemented
103 return ControlledGate(new_sub_gate)
104
105 def _is_parameterized_(self):
106 return protocols.is_parameterized(self.sub_gate)
107
108 def _resolve_parameters_(self, param_resolver):
109 new_sub_gate = protocols.resolve_parameters(self.sub_gate,
110 param_resolver)
111 return ControlledGate(new_sub_gate)
112
113 def _trace_distance_bound_(self):
114 return protocols.trace_distance_bound(self.sub_gate)
115
116 def _circuit_diagram_info_(self,
117 args: protocols.CircuitDiagramInfoArgs
118 ) -> protocols.CircuitDiagramInfo:
119 sub_info = protocols.circuit_diagram_info(self.sub_gate, args, None)
120 if sub_info is None:
121 return NotImplemented
122 return protocols.CircuitDiagramInfo(
123 wire_symbols=('@',) + sub_info.wire_symbols,
124 exponent=sub_info.exponent)
125
126 def __str__(self):
127 return 'C' + str(self.sub_gate)
128
129 def __repr__(self):
130 return 'cirq.ControlledGate(sub_gate={!r})'.format(self.sub_gate)
131
[end of cirq/ops/controlled_gate.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/cirq/ops/controlled_gate.py b/cirq/ops/controlled_gate.py
--- a/cirq/ops/controlled_gate.py
+++ b/cirq/ops/controlled_gate.py
@@ -29,12 +29,6 @@
Args:
sub_gate: The gate to add a control qubit to.
- default_extensions: The extensions method that should be used when
- determining if the controlled gate supports certain gate
- features. For example, if this extensions instance is able to
- cast sub_gate to a ExtrapolatableEffect then the controlled gate
- can also be cast to a ExtrapolatableEffect. When this value is
- None, an empty extensions instance is used instead.
"""
self.sub_gate = sub_gate
@@ -102,6 +96,15 @@
return NotImplemented
return ControlledGate(new_sub_gate)
+ def _phase_by_(self, phase_turns: float, qubit_index: int):
+ if qubit_index == 0:
+ return self
+ phased_gate = protocols.phase_by(
+ self.sub_gate, phase_turns, qubit_index-1, None)
+ if phased_gate is None:
+ return NotImplemented
+ return ControlledGate(phased_gate)
+
def _is_parameterized_(self):
return protocols.is_parameterized(self.sub_gate)
| {"golden_diff": "diff --git a/cirq/ops/controlled_gate.py b/cirq/ops/controlled_gate.py\n--- a/cirq/ops/controlled_gate.py\n+++ b/cirq/ops/controlled_gate.py\n@@ -29,12 +29,6 @@\n \n Args:\n sub_gate: The gate to add a control qubit to.\n- default_extensions: The extensions method that should be used when\n- determining if the controlled gate supports certain gate\n- features. For example, if this extensions instance is able to\n- cast sub_gate to a ExtrapolatableEffect then the controlled gate\n- can also be cast to a ExtrapolatableEffect. When this value is\n- None, an empty extensions instance is used instead.\n \"\"\"\n self.sub_gate = sub_gate\n \n@@ -102,6 +96,15 @@\n return NotImplemented\n return ControlledGate(new_sub_gate)\n \n+ def _phase_by_(self, phase_turns: float, qubit_index: int):\n+ if qubit_index == 0:\n+ return self\n+ phased_gate = protocols.phase_by(\n+ self.sub_gate, phase_turns, qubit_index-1, None)\n+ if phased_gate is None:\n+ return NotImplemented\n+ return ControlledGate(phased_gate)\n+\n def _is_parameterized_(self):\n return protocols.is_parameterized(self.sub_gate)\n", "issue": "Add `_phase_by_` magic method to `ControlledGate`\nComes after https://github.com/quantumlib/Cirq/issues/947\r\n\r\nThe logic is as follows: if the qubit index is 0 (the control), the operation is returned unchanged. If it is larger then we delegate to phasing the sub gate with `cirq.phase_by` and a default result of NotImplemented. If it's NotImplemented, we return NotImplemented. Otherwise we return a controlled gate with the phased sub gate.\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Union, Sequence, Any\n\nimport numpy as np\n\nfrom cirq import linalg, protocols\nfrom cirq.ops import raw_types\nfrom cirq.type_workarounds import NotImplementedType\n\n\nclass ControlledGate(raw_types.Gate):\n \"\"\"Augments existing gates with a control qubit.\"\"\"\n\n def __init__(self, sub_gate: raw_types.Gate) -> None:\n \"\"\"Initializes the controlled gate.\n\n Args:\n sub_gate: The gate to add a control qubit to.\n default_extensions: The extensions method that should be used when\n determining if the controlled gate supports certain gate\n features. For example, if this extensions instance is able to\n cast sub_gate to a ExtrapolatableEffect then the controlled gate\n can also be cast to a ExtrapolatableEffect. When this value is\n None, an empty extensions instance is used instead.\n \"\"\"\n self.sub_gate = sub_gate\n\n def validate_args(self, qubits) -> None:\n if len(qubits) < 1:\n raise ValueError('No control qubit specified.')\n self.sub_gate.validate_args(qubits[1:])\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return NotImplemented\n return self.sub_gate == other.sub_gate\n\n def __ne__(self, other):\n return not self == other\n\n def __hash__(self):\n return hash((ControlledGate, self.sub_gate))\n\n def _apply_unitary_to_tensor_(self,\n target_tensor: np.ndarray,\n available_buffer: np.ndarray,\n axes: Sequence[int],\n ) -> np.ndarray:\n control = axes[0]\n rest = axes[1:]\n active = linalg.slice_for_qubits_equal_to([control], 1)\n sub_axes = [r - int(r > control) for r in rest]\n target_view = target_tensor[active]\n buffer_view = available_buffer[active]\n result = protocols.apply_unitary_to_tensor(\n self.sub_gate,\n target_view,\n buffer_view,\n sub_axes,\n default=NotImplemented)\n\n if result is NotImplemented:\n return NotImplemented\n\n if result is target_view:\n return target_tensor\n\n if result is buffer_view:\n inactive = linalg.slice_for_qubits_equal_to([control], 0)\n available_buffer[inactive] = target_tensor[inactive]\n return available_buffer\n\n # HACK: assume they didn't somehow escape the slice view and edit the\n # rest of target_tensor.\n target_tensor[active] = result\n return target_tensor\n\n def _unitary_(self) -> Union[np.ndarray, NotImplementedType]:\n sub_matrix = protocols.unitary(self.sub_gate, None)\n if sub_matrix is None:\n return NotImplemented\n return linalg.block_diag(np.eye(sub_matrix.shape[0]), sub_matrix)\n\n def __pow__(self, exponent: Any) -> 'ControlledGate':\n new_sub_gate = protocols.pow(self.sub_gate,\n exponent,\n NotImplemented)\n if new_sub_gate is NotImplemented:\n return NotImplemented\n return ControlledGate(new_sub_gate)\n\n def _is_parameterized_(self):\n return protocols.is_parameterized(self.sub_gate)\n\n def _resolve_parameters_(self, param_resolver):\n new_sub_gate = protocols.resolve_parameters(self.sub_gate,\n param_resolver)\n return ControlledGate(new_sub_gate)\n\n def _trace_distance_bound_(self):\n return protocols.trace_distance_bound(self.sub_gate)\n\n def _circuit_diagram_info_(self,\n args: protocols.CircuitDiagramInfoArgs\n ) -> protocols.CircuitDiagramInfo:\n sub_info = protocols.circuit_diagram_info(self.sub_gate, args, None)\n if sub_info is None:\n return NotImplemented\n return protocols.CircuitDiagramInfo(\n wire_symbols=('@',) + sub_info.wire_symbols,\n exponent=sub_info.exponent)\n\n def __str__(self):\n return 'C' + str(self.sub_gate)\n\n def __repr__(self):\n return 'cirq.ControlledGate(sub_gate={!r})'.format(self.sub_gate)\n", "path": "cirq/ops/controlled_gate.py"}]} | 1,945 | 303 |
gh_patches_debug_4315 | rasdani/github-patches | git_diff | frappe__frappe-21985 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Create block in workspace
### Information about bug
### App Versions
```
{
"erpnext": "14.27.2",
"frappe": "14.39.0",
"hrms": "14.4.3",
"india_compliance": "14.10.1",
"payments": "0.0.1"
}
```
### Route
```
Workspaces/Home
```
### Traceback
```
Traceback (most recent call last):
File "apps/frappe/frappe/app.py", line 66, in application
response = frappe.api.handle()
File "apps/frappe/frappe/api.py", line 54, in handle
return frappe.handler.handle()
File "apps/frappe/frappe/handler.py", line 47, in handle
data = execute_cmd(cmd)
File "apps/frappe/frappe/handler.py", line 85, in execute_cmd
return frappe.call(method, **frappe.form_dict)
File "apps/frappe/frappe/__init__.py", line 1608, in call
return fn(*args, **newargs)
File "apps/frappe/frappe/desk/search.py", line 35, in search_link
search_widget(
File "apps/frappe/frappe/desk/search.py", line 106, in search_widget
raise e
File "apps/frappe/frappe/desk/search.py", line 83, in search_widget
frappe.response["values"] = frappe.call(
File "apps/frappe/frappe/__init__.py", line 1608, in call
return fn(*args, **newargs)
File "apps/frappe/frappe/desk/doctype/custom_html_block/custom_html_block.py", line 18, in get_custom_blocks_for_user
condition_query = frappe.qb.get_query(customHTMLBlock)
AttributeError: type object 'MariaDB' has no attribute 'get_query'
```
### Request Data
```
{
"type": "POST",
"args": {
"txt": "",
"doctype": "Custom HTML Block",
"reference_doctype": "",
"query": "frappe.desk.doctype.custom_html_block.custom_html_block.get_custom_blocks_for_user"
},
"headers": {},
"error_handlers": {},
"url": "/api/method/frappe.desk.search.search_link"
}
```
### Response Data
```
{
"exception": "AttributeError: type object 'MariaDB' has no attribute 'get_query'"
}
```
### Module
accounts, other
### Version
{
"erpnext": "14.27.2",
"frappe": "14.39.0",
"hrms": "14.4.3",
"india_compliance": "14.10.1",
"payments": "0.0.1"
}
### Installation method
FrappeCloud
### Relevant log output / Stack trace / Full Error Message.
_No response_
</issue>
<code>
[start of frappe/desk/doctype/custom_html_block/custom_html_block.py]
1 # Copyright (c) 2023, Frappe Technologies and contributors
2 # For license information, please see license.txt
3
4 import frappe
5 from frappe.model.document import Document
6 from frappe.query_builder.utils import DocType
7
8
9 class CustomHTMLBlock(Document):
10 # begin: auto-generated types
11 # This code is auto-generated. Do not modify anything in this block.
12
13 from typing import TYPE_CHECKING
14
15 if TYPE_CHECKING:
16 from frappe.core.doctype.has_role.has_role import HasRole
17 from frappe.types import DF
18
19 html: DF.Code | None
20 private: DF.Check
21 roles: DF.Table[HasRole]
22 script: DF.Code | None
23 style: DF.Code | None
24 # end: auto-generated types
25 pass
26
27
28 @frappe.whitelist()
29 def get_custom_blocks_for_user(doctype, txt, searchfield, start, page_len, filters):
30 # return logged in users private blocks and all public blocks
31 customHTMLBlock = DocType("Custom HTML Block")
32
33 condition_query = frappe.qb.get_query(customHTMLBlock)
34
35 return (
36 condition_query.select(customHTMLBlock.name).where(
37 (customHTMLBlock.private == 0)
38 | ((customHTMLBlock.owner == frappe.session.user) & (customHTMLBlock.private == 1))
39 )
40 ).run()
41
[end of frappe/desk/doctype/custom_html_block/custom_html_block.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/frappe/desk/doctype/custom_html_block/custom_html_block.py b/frappe/desk/doctype/custom_html_block/custom_html_block.py
--- a/frappe/desk/doctype/custom_html_block/custom_html_block.py
+++ b/frappe/desk/doctype/custom_html_block/custom_html_block.py
@@ -30,7 +30,7 @@
# return logged in users private blocks and all public blocks
customHTMLBlock = DocType("Custom HTML Block")
- condition_query = frappe.qb.get_query(customHTMLBlock)
+ condition_query = frappe.qb.from_(customHTMLBlock)
return (
condition_query.select(customHTMLBlock.name).where(
| {"golden_diff": "diff --git a/frappe/desk/doctype/custom_html_block/custom_html_block.py b/frappe/desk/doctype/custom_html_block/custom_html_block.py\n--- a/frappe/desk/doctype/custom_html_block/custom_html_block.py\n+++ b/frappe/desk/doctype/custom_html_block/custom_html_block.py\n@@ -30,7 +30,7 @@\n \t# return logged in users private blocks and all public blocks\n \tcustomHTMLBlock = DocType(\"Custom HTML Block\")\n \n-\tcondition_query = frappe.qb.get_query(customHTMLBlock)\n+\tcondition_query = frappe.qb.from_(customHTMLBlock)\n \n \treturn (\n \t\tcondition_query.select(customHTMLBlock.name).where(\n", "issue": "Create block in workspace\n### Information about bug\n\n### App Versions\r\n```\r\n{\r\n\t\"erpnext\": \"14.27.2\",\r\n\t\"frappe\": \"14.39.0\",\r\n\t\"hrms\": \"14.4.3\",\r\n\t\"india_compliance\": \"14.10.1\",\r\n\t\"payments\": \"0.0.1\"\r\n}\r\n```\r\n### Route\r\n```\r\nWorkspaces/Home\r\n```\r\n### Traceback\r\n```\r\nTraceback (most recent call last):\r\n File \"apps/frappe/frappe/app.py\", line 66, in application\r\n response = frappe.api.handle()\r\n File \"apps/frappe/frappe/api.py\", line 54, in handle\r\n return frappe.handler.handle()\r\n File \"apps/frappe/frappe/handler.py\", line 47, in handle\r\n data = execute_cmd(cmd)\r\n File \"apps/frappe/frappe/handler.py\", line 85, in execute_cmd\r\n return frappe.call(method, **frappe.form_dict)\r\n File \"apps/frappe/frappe/__init__.py\", line 1608, in call\r\n return fn(*args, **newargs)\r\n File \"apps/frappe/frappe/desk/search.py\", line 35, in search_link\r\n search_widget(\r\n File \"apps/frappe/frappe/desk/search.py\", line 106, in search_widget\r\n raise e\r\n File \"apps/frappe/frappe/desk/search.py\", line 83, in search_widget\r\n frappe.response[\"values\"] = frappe.call(\r\n File \"apps/frappe/frappe/__init__.py\", line 1608, in call\r\n return fn(*args, **newargs)\r\n File \"apps/frappe/frappe/desk/doctype/custom_html_block/custom_html_block.py\", line 18, in get_custom_blocks_for_user\r\n condition_query = frappe.qb.get_query(customHTMLBlock)\r\nAttributeError: type object 'MariaDB' has no attribute 'get_query'\r\n\r\n```\r\n### Request Data\r\n```\r\n{\r\n\t\"type\": \"POST\",\r\n\t\"args\": {\r\n\t\t\"txt\": \"\",\r\n\t\t\"doctype\": \"Custom HTML Block\",\r\n\t\t\"reference_doctype\": \"\",\r\n\t\t\"query\": \"frappe.desk.doctype.custom_html_block.custom_html_block.get_custom_blocks_for_user\"\r\n\t},\r\n\t\"headers\": {},\r\n\t\"error_handlers\": {},\r\n\t\"url\": \"/api/method/frappe.desk.search.search_link\"\r\n}\r\n```\r\n### Response Data\r\n```\r\n{\r\n\t\"exception\": \"AttributeError: type object 'MariaDB' has no attribute 'get_query'\"\r\n}\r\n```\n\n### Module\n\naccounts, other\n\n### Version\n\n{\r\n\t\"erpnext\": \"14.27.2\",\r\n\t\"frappe\": \"14.39.0\",\r\n\t\"hrms\": \"14.4.3\",\r\n\t\"india_compliance\": \"14.10.1\",\r\n\t\"payments\": \"0.0.1\"\r\n}\n\n### Installation method\n\nFrappeCloud\n\n### Relevant log output / Stack trace / Full Error Message.\n\n_No response_\n", "before_files": [{"content": "# Copyright (c) 2023, Frappe Technologies and contributors\n# For license information, please see license.txt\n\nimport frappe\nfrom frappe.model.document import Document\nfrom frappe.query_builder.utils import DocType\n\n\nclass CustomHTMLBlock(Document):\n\t# begin: auto-generated types\n\t# This code is auto-generated. Do not modify anything in this block.\n\n\tfrom typing import TYPE_CHECKING\n\n\tif TYPE_CHECKING:\n\t\tfrom frappe.core.doctype.has_role.has_role import HasRole\n\t\tfrom frappe.types import DF\n\n\t\thtml: DF.Code | None\n\t\tprivate: DF.Check\n\t\troles: DF.Table[HasRole]\n\t\tscript: DF.Code | None\n\t\tstyle: DF.Code | None\n\t# end: auto-generated types\n\tpass\n\n\[email protected]()\ndef get_custom_blocks_for_user(doctype, txt, searchfield, start, page_len, filters):\n\t# return logged in users private blocks and all public blocks\n\tcustomHTMLBlock = DocType(\"Custom HTML Block\")\n\n\tcondition_query = frappe.qb.get_query(customHTMLBlock)\n\n\treturn (\n\t\tcondition_query.select(customHTMLBlock.name).where(\n\t\t\t(customHTMLBlock.private == 0)\n\t\t\t| ((customHTMLBlock.owner == frappe.session.user) & (customHTMLBlock.private == 1))\n\t\t)\n\t).run()\n", "path": "frappe/desk/doctype/custom_html_block/custom_html_block.py"}]} | 1,608 | 150 |
gh_patches_debug_2833 | rasdani/github-patches | git_diff | facebookresearch__fairseq-200 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Assertion Error in preprocessing of FAIR Sequence-to-Sequence Toolkit for Story Generation
when I run
$ TEXT=examples/stories/writingPrompts
$ python preprocess.py --source-lang wp_source --target-lang wp_target \
--trainpref $TEXT/train --validpref $TEXT/valid --testpref $TEXT/test \
--destdir data-bin/writingPrompts --thresholdtgt 10 --thresholdsrc 10
I get an AssertionError:
Traceback (most recent call last):
File "preprocess.py", line 201, in <module>
main(args)
File "preprocess.py", line 101, in main
padding_factor=args.padding_factor,
File "/home/username/workspace/benchmarking-tools/fairseq/fairseq/data/dictionary.py", line 137, in finalize
assert len(new_count) == self.nspecial or min(new_count[self.nspecial:]) >= threshold
AssertionError
Upon inspection i see that min(new_count[self.nspecial:]) is 0
</issue>
<code>
[start of fairseq/data/dictionary.py]
1 # Copyright (c) 2017-present, Facebook, Inc.
2 # All rights reserved.
3 #
4 # This source code is licensed under the license found in the LICENSE file in
5 # the root directory of this source tree. An additional grant of patent rights
6 # can be found in the PATENTS file in the same directory.
7
8 from collections import Counter
9 import os
10
11 import torch
12
13
14 class Dictionary(object):
15 """A mapping from symbols to consecutive integers"""
16 def __init__(self, pad='<pad>', eos='</s>', unk='<unk>'):
17 self.unk_word, self.pad_word, self.eos_word = unk, pad, eos
18 self.symbols = []
19 self.count = []
20 self.indices = {}
21 # dictionary indexing starts at 1 for consistency with Lua
22 self.add_symbol('<Lua heritage>')
23 self.pad_index = self.add_symbol(pad)
24 self.eos_index = self.add_symbol(eos)
25 self.unk_index = self.add_symbol(unk)
26 self.nspecial = len(self.symbols)
27
28 def __eq__(self, other):
29 return self.indices == other.indices
30
31 def __getitem__(self, idx):
32 if idx < len(self.symbols):
33 return self.symbols[idx]
34 return self.unk_word
35
36 def __len__(self):
37 """Returns the number of symbols in the dictionary"""
38 return len(self.symbols)
39
40 def index(self, sym):
41 """Returns the index of the specified symbol"""
42 if sym in self.indices:
43 return self.indices[sym]
44 return self.unk_index
45
46 def string(self, tensor, bpe_symbol=None, escape_unk=False):
47 """Helper for converting a tensor of token indices to a string.
48
49 Can optionally remove BPE symbols or escape <unk> words.
50 """
51 if torch.is_tensor(tensor) and tensor.dim() == 2:
52 return '\n'.join(self.string(t) for t in tensor)
53
54 def token_string(i):
55 if i == self.unk():
56 return self.unk_string(escape_unk)
57 else:
58 return self[i]
59
60 sent = ' '.join(token_string(i) for i in tensor if i != self.eos())
61 if bpe_symbol is not None:
62 sent = (sent + ' ').replace(bpe_symbol, '').rstrip()
63 return sent
64
65 def unk_string(self, escape=False):
66 """Return unknown string, optionally escaped as: <<unk>>"""
67 if escape:
68 return '<{}>'.format(self.unk_word)
69 else:
70 return self.unk_word
71
72 def add_symbol(self, word, n=1):
73 """Adds a word to the dictionary"""
74 if word in self.indices:
75 idx = self.indices[word]
76 self.count[idx] = self.count[idx] + n
77 return idx
78 else:
79 idx = len(self.symbols)
80 self.indices[word] = idx
81 self.symbols.append(word)
82 self.count.append(n)
83 return idx
84
85 def update(self, new_dict):
86 """Updates counts from new dictionary."""
87 for word in new_dict.symbols:
88 idx2 = new_dict.indices[word]
89 if word in self.indices:
90 idx = self.indices[word]
91 self.count[idx] = self.count[idx] + new_dict.count[idx2]
92 else:
93 idx = len(self.symbols)
94 self.indices[word] = idx
95 self.symbols.append(word)
96 self.count.append(new_dict.count[idx2])
97
98 def finalize(self, threshold=-1, nwords=-1, padding_factor=8):
99 """Sort symbols by frequency in descending order, ignoring special ones.
100
101 Args:
102 - threshold defines the minimum word count
103 - nwords defines the total number of words in the final dictionary,
104 including special symbols
105 - padding_factor can be used to pad the dictionary size to be a
106 multiple of 8, which is important on some hardware (e.g., Nvidia
107 Tensor Cores).
108 """
109 if nwords <= 0:
110 nwords = len(self)
111
112 new_indices = dict(zip(self.symbols[:self.nspecial], range(self.nspecial)))
113 new_symbols = self.symbols[:self.nspecial]
114 new_count = self.count[:self.nspecial]
115
116 c = Counter(dict(zip(self.symbols[self.nspecial:], self.count[self.nspecial:])))
117 for symbol, count in c.most_common(nwords - self.nspecial):
118 if count >= threshold:
119 new_indices[symbol] = len(new_symbols)
120 new_symbols.append(symbol)
121 new_count.append(count)
122 else:
123 break
124
125 threshold_nwords = len(new_symbols)
126 if padding_factor > 1:
127 i = 0
128 while threshold_nwords % padding_factor != 0:
129 symbol = 'madeupword{:04d}'.format(i)
130 new_indices[symbol] = len(new_symbols)
131 new_symbols.append(symbol)
132 new_count.append(0)
133 i += 1
134 threshold_nwords += 1
135
136 assert len(new_count) == self.nspecial or min(new_count[self.nspecial:]) >= threshold
137 assert len(new_symbols) % padding_factor == 0
138 assert len(new_symbols) == len(new_indices)
139
140 self.count = list(new_count)
141 self.symbols = list(new_symbols)
142 self.indices = new_indices
143
144 def pad(self):
145 """Helper to get index of pad symbol"""
146 return self.pad_index
147
148 def eos(self):
149 """Helper to get index of end-of-sentence symbol"""
150 return self.eos_index
151
152 def unk(self):
153 """Helper to get index of unk symbol"""
154 return self.unk_index
155
156 @classmethod
157 def load(cls, f, ignore_utf_errors=False):
158 """Loads the dictionary from a text file with the format:
159
160 ```
161 <symbol0> <count0>
162 <symbol1> <count1>
163 ...
164 ```
165 """
166 if isinstance(f, str):
167 try:
168 if not ignore_utf_errors:
169 with open(f, 'r', encoding='utf-8') as fd:
170 return cls.load(fd)
171 else:
172 with open(f, 'r', encoding='utf-8', errors='ignore') as fd:
173 return cls.load(fd)
174 except FileNotFoundError as fnfe:
175 raise fnfe
176 except Exception:
177 raise Exception("Incorrect encoding detected in {}, please "
178 "rebuild the dataset".format(f))
179
180 d = cls()
181 for line in f.readlines():
182 idx = line.rfind(' ')
183 word = line[:idx]
184 count = int(line[idx+1:])
185 d.indices[word] = len(d.symbols)
186 d.symbols.append(word)
187 d.count.append(count)
188 return d
189
190 def save(self, f):
191 """Stores dictionary into a text file"""
192 if isinstance(f, str):
193 os.makedirs(os.path.dirname(f), exist_ok=True)
194 with open(f, 'w', encoding='utf-8') as fd:
195 return self.save(fd)
196 for symbol, count in zip(self.symbols[self.nspecial:], self.count[self.nspecial:]):
197 print('{} {}'.format(symbol, count), file=f)
198
199 def dummy_sentence(self, length):
200 t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()
201 t[-1] = self.eos()
202 return t
203
[end of fairseq/data/dictionary.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/fairseq/data/dictionary.py b/fairseq/data/dictionary.py
--- a/fairseq/data/dictionary.py
+++ b/fairseq/data/dictionary.py
@@ -133,7 +133,6 @@
i += 1
threshold_nwords += 1
- assert len(new_count) == self.nspecial or min(new_count[self.nspecial:]) >= threshold
assert len(new_symbols) % padding_factor == 0
assert len(new_symbols) == len(new_indices)
| {"golden_diff": "diff --git a/fairseq/data/dictionary.py b/fairseq/data/dictionary.py\n--- a/fairseq/data/dictionary.py\n+++ b/fairseq/data/dictionary.py\n@@ -133,7 +133,6 @@\n i += 1\n threshold_nwords += 1\n \n- assert len(new_count) == self.nspecial or min(new_count[self.nspecial:]) >= threshold\n assert len(new_symbols) % padding_factor == 0\n assert len(new_symbols) == len(new_indices)\n", "issue": "Assertion Error in preprocessing of FAIR Sequence-to-Sequence Toolkit for Story Generation\nwhen I run \r\n$ TEXT=examples/stories/writingPrompts\r\n$ python preprocess.py --source-lang wp_source --target-lang wp_target \\\r\n --trainpref $TEXT/train --validpref $TEXT/valid --testpref $TEXT/test \\\r\n --destdir data-bin/writingPrompts --thresholdtgt 10 --thresholdsrc 10\r\n\r\nI get an AssertionError:\r\nTraceback (most recent call last):\r\n File \"preprocess.py\", line 201, in <module>\r\n main(args)\r\n File \"preprocess.py\", line 101, in main\r\n padding_factor=args.padding_factor,\r\n File \"/home/username/workspace/benchmarking-tools/fairseq/fairseq/data/dictionary.py\", line 137, in finalize\r\n assert len(new_count) == self.nspecial or min(new_count[self.nspecial:]) >= threshold\r\nAssertionError\r\n\r\nUpon inspection i see that min(new_count[self.nspecial:]) is 0\n", "before_files": [{"content": "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\nfrom collections import Counter\nimport os\n\nimport torch\n\n\nclass Dictionary(object):\n \"\"\"A mapping from symbols to consecutive integers\"\"\"\n def __init__(self, pad='<pad>', eos='</s>', unk='<unk>'):\n self.unk_word, self.pad_word, self.eos_word = unk, pad, eos\n self.symbols = []\n self.count = []\n self.indices = {}\n # dictionary indexing starts at 1 for consistency with Lua\n self.add_symbol('<Lua heritage>')\n self.pad_index = self.add_symbol(pad)\n self.eos_index = self.add_symbol(eos)\n self.unk_index = self.add_symbol(unk)\n self.nspecial = len(self.symbols)\n\n def __eq__(self, other):\n return self.indices == other.indices\n\n def __getitem__(self, idx):\n if idx < len(self.symbols):\n return self.symbols[idx]\n return self.unk_word\n\n def __len__(self):\n \"\"\"Returns the number of symbols in the dictionary\"\"\"\n return len(self.symbols)\n\n def index(self, sym):\n \"\"\"Returns the index of the specified symbol\"\"\"\n if sym in self.indices:\n return self.indices[sym]\n return self.unk_index\n\n def string(self, tensor, bpe_symbol=None, escape_unk=False):\n \"\"\"Helper for converting a tensor of token indices to a string.\n\n Can optionally remove BPE symbols or escape <unk> words.\n \"\"\"\n if torch.is_tensor(tensor) and tensor.dim() == 2:\n return '\\n'.join(self.string(t) for t in tensor)\n\n def token_string(i):\n if i == self.unk():\n return self.unk_string(escape_unk)\n else:\n return self[i]\n\n sent = ' '.join(token_string(i) for i in tensor if i != self.eos())\n if bpe_symbol is not None:\n sent = (sent + ' ').replace(bpe_symbol, '').rstrip()\n return sent\n\n def unk_string(self, escape=False):\n \"\"\"Return unknown string, optionally escaped as: <<unk>>\"\"\"\n if escape:\n return '<{}>'.format(self.unk_word)\n else:\n return self.unk_word\n\n def add_symbol(self, word, n=1):\n \"\"\"Adds a word to the dictionary\"\"\"\n if word in self.indices:\n idx = self.indices[word]\n self.count[idx] = self.count[idx] + n\n return idx\n else:\n idx = len(self.symbols)\n self.indices[word] = idx\n self.symbols.append(word)\n self.count.append(n)\n return idx\n\n def update(self, new_dict):\n \"\"\"Updates counts from new dictionary.\"\"\"\n for word in new_dict.symbols:\n idx2 = new_dict.indices[word]\n if word in self.indices:\n idx = self.indices[word]\n self.count[idx] = self.count[idx] + new_dict.count[idx2]\n else:\n idx = len(self.symbols)\n self.indices[word] = idx\n self.symbols.append(word)\n self.count.append(new_dict.count[idx2])\n\n def finalize(self, threshold=-1, nwords=-1, padding_factor=8):\n \"\"\"Sort symbols by frequency in descending order, ignoring special ones.\n\n Args:\n - threshold defines the minimum word count\n - nwords defines the total number of words in the final dictionary,\n including special symbols\n - padding_factor can be used to pad the dictionary size to be a\n multiple of 8, which is important on some hardware (e.g., Nvidia\n Tensor Cores).\n \"\"\"\n if nwords <= 0:\n nwords = len(self)\n\n new_indices = dict(zip(self.symbols[:self.nspecial], range(self.nspecial)))\n new_symbols = self.symbols[:self.nspecial]\n new_count = self.count[:self.nspecial]\n\n c = Counter(dict(zip(self.symbols[self.nspecial:], self.count[self.nspecial:])))\n for symbol, count in c.most_common(nwords - self.nspecial):\n if count >= threshold:\n new_indices[symbol] = len(new_symbols)\n new_symbols.append(symbol)\n new_count.append(count)\n else:\n break\n\n threshold_nwords = len(new_symbols)\n if padding_factor > 1:\n i = 0\n while threshold_nwords % padding_factor != 0:\n symbol = 'madeupword{:04d}'.format(i)\n new_indices[symbol] = len(new_symbols)\n new_symbols.append(symbol)\n new_count.append(0)\n i += 1\n threshold_nwords += 1\n\n assert len(new_count) == self.nspecial or min(new_count[self.nspecial:]) >= threshold\n assert len(new_symbols) % padding_factor == 0\n assert len(new_symbols) == len(new_indices)\n\n self.count = list(new_count)\n self.symbols = list(new_symbols)\n self.indices = new_indices\n\n def pad(self):\n \"\"\"Helper to get index of pad symbol\"\"\"\n return self.pad_index\n\n def eos(self):\n \"\"\"Helper to get index of end-of-sentence symbol\"\"\"\n return self.eos_index\n\n def unk(self):\n \"\"\"Helper to get index of unk symbol\"\"\"\n return self.unk_index\n\n @classmethod\n def load(cls, f, ignore_utf_errors=False):\n \"\"\"Loads the dictionary from a text file with the format:\n\n ```\n <symbol0> <count0>\n <symbol1> <count1>\n ...\n ```\n \"\"\"\n if isinstance(f, str):\n try:\n if not ignore_utf_errors:\n with open(f, 'r', encoding='utf-8') as fd:\n return cls.load(fd)\n else:\n with open(f, 'r', encoding='utf-8', errors='ignore') as fd:\n return cls.load(fd)\n except FileNotFoundError as fnfe:\n raise fnfe\n except Exception:\n raise Exception(\"Incorrect encoding detected in {}, please \"\n \"rebuild the dataset\".format(f))\n\n d = cls()\n for line in f.readlines():\n idx = line.rfind(' ')\n word = line[:idx]\n count = int(line[idx+1:])\n d.indices[word] = len(d.symbols)\n d.symbols.append(word)\n d.count.append(count)\n return d\n\n def save(self, f):\n \"\"\"Stores dictionary into a text file\"\"\"\n if isinstance(f, str):\n os.makedirs(os.path.dirname(f), exist_ok=True)\n with open(f, 'w', encoding='utf-8') as fd:\n return self.save(fd)\n for symbol, count in zip(self.symbols[self.nspecial:], self.count[self.nspecial:]):\n print('{} {}'.format(symbol, count), file=f)\n\n def dummy_sentence(self, length):\n t = torch.Tensor(length).uniform_(self.nspecial + 1, len(self)).long()\n t[-1] = self.eos()\n return t\n", "path": "fairseq/data/dictionary.py"}]} | 2,832 | 114 |
gh_patches_debug_18464 | rasdani/github-patches | git_diff | mindee__doctr-681 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Text recognition: Split wide crops parameter highly decrease performances
### 🚀 The feature
Fine tune the wide crops splitting method
### Motivation, pitch
Hey!
It's not a bug per se, because my code is running well. But I retrained a text recognition model, and the performances on my test set were very low compared to my validation set (both from the same dataset).
I digged a bit into the code, and noticed thes lines:
```
remapped = False
if self.split_wide_crops:
new_crops, crop_map, remapped = split_crops(crops, self.critical_ar, self.target_ar, self.dil_factor)
if remapped:
crops = new_crops
```
I commented them, and the performances were back to what's expected.
I understand the motivation on this feature, but i noticed that it creates a lot of characters repetition in the output strings.
Example:
"AZZ812ZZF21" -> "AZZ81212ZZF21"
It's behaving as the cut was adding some padding, and some characters are recognized twice, leading to a repetition when reconstructing the final output.
I can send you a few sample data on Mindee's slack community if you need it.
### Alternatives
_No response_
### Additional context
_No response_
</issue>
<code>
[start of doctr/models/recognition/predictor/tensorflow.py]
1 # Copyright (C) 2021, Mindee.
2
3 # This program is licensed under the Apache License version 2.
4 # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
5
6 from typing import Any, List, Tuple, Union
7
8 import numpy as np
9 import tensorflow as tf
10
11 from doctr.models.preprocessor import PreProcessor
12 from doctr.utils.repr import NestedObject
13
14 from ..core import RecognitionModel
15 from ._utils import remap_preds, split_crops
16
17 __all__ = ['RecognitionPredictor']
18
19
20 class RecognitionPredictor(NestedObject):
21 """Implements an object able to identify character sequences in images
22
23 Args:
24 pre_processor: transform inputs for easier batched model inference
25 model: core detection architecture
26 split_wide_crops: wether to use crop splitting for high aspect ratio crops
27 """
28
29 _children_names: List[str] = ['pre_processor', 'model']
30
31 def __init__(
32 self,
33 pre_processor: PreProcessor,
34 model: RecognitionModel,
35 split_wide_crops: bool = True,
36 ) -> None:
37
38 super().__init__()
39 self.pre_processor = pre_processor
40 self.model = model
41 self.split_wide_crops = split_wide_crops
42 self.critical_ar = 8 # Critical aspect ratio
43 self.dil_factor = 1.4 # Dilation factor to overlap the crops
44 self.target_ar = 4 # Target aspect ratio
45
46 def __call__(
47 self,
48 crops: List[Union[np.ndarray, tf.Tensor]],
49 **kwargs: Any,
50 ) -> List[Tuple[str, float]]:
51
52 if len(crops) == 0:
53 return []
54 # Dimension check
55 if any(crop.ndim != 3 for crop in crops):
56 raise ValueError("incorrect input shape: all crops are expected to be multi-channel 2D images.")
57
58 # Split crops that are too wide
59 remapped = False
60 if self.split_wide_crops:
61 new_crops, crop_map, remapped = split_crops(crops, self.critical_ar, self.target_ar, self.dil_factor)
62 if remapped:
63 crops = new_crops
64
65 # Resize & batch them
66 processed_batches = self.pre_processor(crops)
67
68 # Forward it
69 raw = [
70 self.model(batch, return_preds=True, training=False, **kwargs)['preds'] # type: ignore[operator]
71 for batch in processed_batches
72 ]
73
74 # Process outputs
75 out = [charseq for batch in raw for charseq in batch]
76
77 # Remap crops
78 if self.split_wide_crops and remapped:
79 out = remap_preds(out, crop_map, self.dil_factor)
80
81 return out
82
[end of doctr/models/recognition/predictor/tensorflow.py]
[start of doctr/models/recognition/predictor/pytorch.py]
1 # Copyright (C) 2021, Mindee.
2
3 # This program is licensed under the Apache License version 2.
4 # See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.
5
6 from typing import Any, List, Tuple, Union
7
8 import numpy as np
9 import torch
10 from torch import nn
11
12 from doctr.models.preprocessor import PreProcessor
13
14 from ._utils import remap_preds, split_crops
15
16 __all__ = ['RecognitionPredictor']
17
18
19 class RecognitionPredictor(nn.Module):
20 """Implements an object able to identify character sequences in images
21
22 Args:
23 pre_processor: transform inputs for easier batched model inference
24 model: core detection architecture
25 split_wide_crops: wether to use crop splitting for high aspect ratio crops
26 """
27
28 def __init__(
29 self,
30 pre_processor: PreProcessor,
31 model: nn.Module,
32 split_wide_crops: bool = True,
33 ) -> None:
34
35 super().__init__()
36 self.pre_processor = pre_processor
37 self.model = model.eval()
38 self.split_wide_crops = split_wide_crops
39 self.critical_ar = 8 # Critical aspect ratio
40 self.dil_factor = 1.4 # Dilation factor to overlap the crops
41 self.target_ar = 4 # Target aspect ratio
42
43 @torch.no_grad()
44 def forward(
45 self,
46 crops: List[Union[np.ndarray, torch.Tensor]],
47 **kwargs: Any,
48 ) -> List[Tuple[str, float]]:
49
50 if len(crops) == 0:
51 return []
52 # Dimension check
53 if any(crop.ndim != 3 for crop in crops):
54 raise ValueError("incorrect input shape: all crops are expected to be multi-channel 2D images.")
55
56 # Split crops that are too wide
57 remapped = False
58 if self.split_wide_crops:
59 new_crops, crop_map, remapped = split_crops(
60 crops,
61 self.critical_ar,
62 self.target_ar,
63 self.dil_factor,
64 isinstance(crops[0], np.ndarray)
65 )
66 if remapped:
67 crops = new_crops
68
69 # Resize & batch them
70 processed_batches = self.pre_processor(crops)
71
72 # Forward it
73 raw = [
74 self.model(batch, return_preds=True, **kwargs)['preds'] # type: ignore[operator]
75 for batch in processed_batches
76 ]
77
78 # Process outputs
79 out = [charseq for batch in raw for charseq in batch]
80
81 # Remap crops
82 if self.split_wide_crops and remapped:
83 out = remap_preds(out, crop_map, self.dil_factor)
84
85 return out
86
[end of doctr/models/recognition/predictor/pytorch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/doctr/models/recognition/predictor/pytorch.py b/doctr/models/recognition/predictor/pytorch.py
--- a/doctr/models/recognition/predictor/pytorch.py
+++ b/doctr/models/recognition/predictor/pytorch.py
@@ -38,7 +38,7 @@
self.split_wide_crops = split_wide_crops
self.critical_ar = 8 # Critical aspect ratio
self.dil_factor = 1.4 # Dilation factor to overlap the crops
- self.target_ar = 4 # Target aspect ratio
+ self.target_ar = 6 # Target aspect ratio
@torch.no_grad()
def forward(
diff --git a/doctr/models/recognition/predictor/tensorflow.py b/doctr/models/recognition/predictor/tensorflow.py
--- a/doctr/models/recognition/predictor/tensorflow.py
+++ b/doctr/models/recognition/predictor/tensorflow.py
@@ -41,7 +41,7 @@
self.split_wide_crops = split_wide_crops
self.critical_ar = 8 # Critical aspect ratio
self.dil_factor = 1.4 # Dilation factor to overlap the crops
- self.target_ar = 4 # Target aspect ratio
+ self.target_ar = 6 # Target aspect ratio
def __call__(
self,
| {"golden_diff": "diff --git a/doctr/models/recognition/predictor/pytorch.py b/doctr/models/recognition/predictor/pytorch.py\n--- a/doctr/models/recognition/predictor/pytorch.py\n+++ b/doctr/models/recognition/predictor/pytorch.py\n@@ -38,7 +38,7 @@\n self.split_wide_crops = split_wide_crops\n self.critical_ar = 8 # Critical aspect ratio\n self.dil_factor = 1.4 # Dilation factor to overlap the crops\n- self.target_ar = 4 # Target aspect ratio\n+ self.target_ar = 6 # Target aspect ratio\n \n @torch.no_grad()\n def forward(\ndiff --git a/doctr/models/recognition/predictor/tensorflow.py b/doctr/models/recognition/predictor/tensorflow.py\n--- a/doctr/models/recognition/predictor/tensorflow.py\n+++ b/doctr/models/recognition/predictor/tensorflow.py\n@@ -41,7 +41,7 @@\n self.split_wide_crops = split_wide_crops\n self.critical_ar = 8 # Critical aspect ratio\n self.dil_factor = 1.4 # Dilation factor to overlap the crops\n- self.target_ar = 4 # Target aspect ratio\n+ self.target_ar = 6 # Target aspect ratio\n \n def __call__(\n self,\n", "issue": "Text recognition: Split wide crops parameter highly decrease performances\n### \ud83d\ude80 The feature\n\nFine tune the wide crops splitting method\n\n### Motivation, pitch\n\nHey!\r\n\r\nIt's not a bug per se, because my code is running well. But I retrained a text recognition model, and the performances on my test set were very low compared to my validation set (both from the same dataset).\r\n\r\nI digged a bit into the code, and noticed thes lines:\r\n```\r\nremapped = False\r\nif self.split_wide_crops:\r\n new_crops, crop_map, remapped = split_crops(crops, self.critical_ar, self.target_ar, self.dil_factor)\r\n if remapped:\r\n crops = new_crops\r\n```\r\n\r\nI commented them, and the performances were back to what's expected.\r\n\r\nI understand the motivation on this feature, but i noticed that it creates a lot of characters repetition in the output strings.\r\n\r\nExample:\r\n\"AZZ812ZZF21\" -> \"AZZ81212ZZF21\"\r\n\r\nIt's behaving as the cut was adding some padding, and some characters are recognized twice, leading to a repetition when reconstructing the final output.\r\n\r\nI can send you a few sample data on Mindee's slack community if you need it.\r\n\n\n### Alternatives\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nfrom typing import Any, List, Tuple, Union\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom doctr.models.preprocessor import PreProcessor\nfrom doctr.utils.repr import NestedObject\n\nfrom ..core import RecognitionModel\nfrom ._utils import remap_preds, split_crops\n\n__all__ = ['RecognitionPredictor']\n\n\nclass RecognitionPredictor(NestedObject):\n \"\"\"Implements an object able to identify character sequences in images\n\n Args:\n pre_processor: transform inputs for easier batched model inference\n model: core detection architecture\n split_wide_crops: wether to use crop splitting for high aspect ratio crops\n \"\"\"\n\n _children_names: List[str] = ['pre_processor', 'model']\n\n def __init__(\n self,\n pre_processor: PreProcessor,\n model: RecognitionModel,\n split_wide_crops: bool = True,\n ) -> None:\n\n super().__init__()\n self.pre_processor = pre_processor\n self.model = model\n self.split_wide_crops = split_wide_crops\n self.critical_ar = 8 # Critical aspect ratio\n self.dil_factor = 1.4 # Dilation factor to overlap the crops\n self.target_ar = 4 # Target aspect ratio\n\n def __call__(\n self,\n crops: List[Union[np.ndarray, tf.Tensor]],\n **kwargs: Any,\n ) -> List[Tuple[str, float]]:\n\n if len(crops) == 0:\n return []\n # Dimension check\n if any(crop.ndim != 3 for crop in crops):\n raise ValueError(\"incorrect input shape: all crops are expected to be multi-channel 2D images.\")\n\n # Split crops that are too wide\n remapped = False\n if self.split_wide_crops:\n new_crops, crop_map, remapped = split_crops(crops, self.critical_ar, self.target_ar, self.dil_factor)\n if remapped:\n crops = new_crops\n\n # Resize & batch them\n processed_batches = self.pre_processor(crops)\n\n # Forward it\n raw = [\n self.model(batch, return_preds=True, training=False, **kwargs)['preds'] # type: ignore[operator]\n for batch in processed_batches\n ]\n\n # Process outputs\n out = [charseq for batch in raw for charseq in batch]\n\n # Remap crops\n if self.split_wide_crops and remapped:\n out = remap_preds(out, crop_map, self.dil_factor)\n\n return out\n", "path": "doctr/models/recognition/predictor/tensorflow.py"}, {"content": "# Copyright (C) 2021, Mindee.\n\n# This program is licensed under the Apache License version 2.\n# See LICENSE or go to <https://www.apache.org/licenses/LICENSE-2.0.txt> for full license details.\n\nfrom typing import Any, List, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom torch import nn\n\nfrom doctr.models.preprocessor import PreProcessor\n\nfrom ._utils import remap_preds, split_crops\n\n__all__ = ['RecognitionPredictor']\n\n\nclass RecognitionPredictor(nn.Module):\n \"\"\"Implements an object able to identify character sequences in images\n\n Args:\n pre_processor: transform inputs for easier batched model inference\n model: core detection architecture\n split_wide_crops: wether to use crop splitting for high aspect ratio crops\n \"\"\"\n\n def __init__(\n self,\n pre_processor: PreProcessor,\n model: nn.Module,\n split_wide_crops: bool = True,\n ) -> None:\n\n super().__init__()\n self.pre_processor = pre_processor\n self.model = model.eval()\n self.split_wide_crops = split_wide_crops\n self.critical_ar = 8 # Critical aspect ratio\n self.dil_factor = 1.4 # Dilation factor to overlap the crops\n self.target_ar = 4 # Target aspect ratio\n\n @torch.no_grad()\n def forward(\n self,\n crops: List[Union[np.ndarray, torch.Tensor]],\n **kwargs: Any,\n ) -> List[Tuple[str, float]]:\n\n if len(crops) == 0:\n return []\n # Dimension check\n if any(crop.ndim != 3 for crop in crops):\n raise ValueError(\"incorrect input shape: all crops are expected to be multi-channel 2D images.\")\n\n # Split crops that are too wide\n remapped = False\n if self.split_wide_crops:\n new_crops, crop_map, remapped = split_crops(\n crops,\n self.critical_ar,\n self.target_ar,\n self.dil_factor,\n isinstance(crops[0], np.ndarray)\n )\n if remapped:\n crops = new_crops\n\n # Resize & batch them\n processed_batches = self.pre_processor(crops)\n\n # Forward it\n raw = [\n self.model(batch, return_preds=True, **kwargs)['preds'] # type: ignore[operator]\n for batch in processed_batches\n ]\n\n # Process outputs\n out = [charseq for batch in raw for charseq in batch]\n\n # Remap crops\n if self.split_wide_crops and remapped:\n out = remap_preds(out, crop_map, self.dil_factor)\n\n return out\n", "path": "doctr/models/recognition/predictor/pytorch.py"}]} | 2,394 | 310 |
gh_patches_debug_9470 | rasdani/github-patches | git_diff | nextcloud__appstore-372 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
schema does not allow digits in app ids
Apparently app ids like ``twofactor_u2f`` are not allowed by the info.xml schema. Could we change that regex to allow digits too or are there any strong arguments against that?
ref https://github.com/nextcloud/appstore/blob/e4567ce707b332ca14eb35e322bff5ec4397191b/nextcloudappstore/core/api/v1/release/info.xsd#L245-L250
</issue>
<code>
[start of nextcloudappstore/core/api/v1/urls.py]
1 from django.conf.urls import url
2 from django.views.decorators.http import etag
3 from nextcloudappstore.core.api.v1.views import AppView, AppReleaseView, \
4 CategoryView, SessionObtainAuthToken, RegenerateAuthToken, AppRatingView, \
5 AppRegisterView
6 from nextcloudappstore.core.caching import app_ratings_etag, categories_etag, \
7 apps_etag
8 from nextcloudappstore.core.versioning import SEMVER_REGEX
9
10 urlpatterns = [
11 url(r'^platform/(?P<version>\d+\.\d+\.\d+)/apps\.json$',
12 etag(apps_etag)(AppView.as_view()), name='app'),
13 url(r'^apps/releases/?$', AppReleaseView.as_view(),
14 name='app-release-create'),
15 url(r'^apps/?$', AppRegisterView.as_view(), name='app-register'),
16 url(r'^apps/(?P<pk>[a-z_]+)/?$', AppView.as_view(), name='app-delete'),
17 url(r'^ratings.json$',
18 etag(app_ratings_etag)(AppRatingView.as_view()),
19 name='app-ratings'),
20 url(r'^apps/(?P<app>[a-z_]+)/releases/(?:(?P<nightly>nightly)/)?'
21 r'(?P<version>' + SEMVER_REGEX + ')/?$',
22 AppReleaseView.as_view(), name='app-release-delete'),
23 url(r'^token/?$', SessionObtainAuthToken.as_view(), name='user-token'),
24 url(r'^token/new/?$', RegenerateAuthToken.as_view(),
25 name='user-token-new'),
26 url(r'^categories.json$',
27 etag(categories_etag)(CategoryView.as_view()), name='category'),
28 ]
29
[end of nextcloudappstore/core/api/v1/urls.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/nextcloudappstore/core/api/v1/urls.py b/nextcloudappstore/core/api/v1/urls.py
--- a/nextcloudappstore/core/api/v1/urls.py
+++ b/nextcloudappstore/core/api/v1/urls.py
@@ -13,7 +13,7 @@
url(r'^apps/releases/?$', AppReleaseView.as_view(),
name='app-release-create'),
url(r'^apps/?$', AppRegisterView.as_view(), name='app-register'),
- url(r'^apps/(?P<pk>[a-z_]+)/?$', AppView.as_view(), name='app-delete'),
+ url(r'^apps/(?P<pk>[a-z0-9_]+)/?$', AppView.as_view(), name='app-delete'),
url(r'^ratings.json$',
etag(app_ratings_etag)(AppRatingView.as_view()),
name='app-ratings'),
| {"golden_diff": "diff --git a/nextcloudappstore/core/api/v1/urls.py b/nextcloudappstore/core/api/v1/urls.py\n--- a/nextcloudappstore/core/api/v1/urls.py\n+++ b/nextcloudappstore/core/api/v1/urls.py\n@@ -13,7 +13,7 @@\n url(r'^apps/releases/?$', AppReleaseView.as_view(),\n name='app-release-create'),\n url(r'^apps/?$', AppRegisterView.as_view(), name='app-register'),\n- url(r'^apps/(?P<pk>[a-z_]+)/?$', AppView.as_view(), name='app-delete'),\n+ url(r'^apps/(?P<pk>[a-z0-9_]+)/?$', AppView.as_view(), name='app-delete'),\n url(r'^ratings.json$',\n etag(app_ratings_etag)(AppRatingView.as_view()),\n name='app-ratings'),\n", "issue": "schema does not allow digits in app ids\nApparently app ids like ``twofactor_u2f`` are not allowed by the info.xml schema. Could we change that regex to allow digits too or are there any strong arguments against that?\r\n\r\nref https://github.com/nextcloud/appstore/blob/e4567ce707b332ca14eb35e322bff5ec4397191b/nextcloudappstore/core/api/v1/release/info.xsd#L245-L250\n", "before_files": [{"content": "from django.conf.urls import url\nfrom django.views.decorators.http import etag\nfrom nextcloudappstore.core.api.v1.views import AppView, AppReleaseView, \\\n CategoryView, SessionObtainAuthToken, RegenerateAuthToken, AppRatingView, \\\n AppRegisterView\nfrom nextcloudappstore.core.caching import app_ratings_etag, categories_etag, \\\n apps_etag\nfrom nextcloudappstore.core.versioning import SEMVER_REGEX\n\nurlpatterns = [\n url(r'^platform/(?P<version>\\d+\\.\\d+\\.\\d+)/apps\\.json$',\n etag(apps_etag)(AppView.as_view()), name='app'),\n url(r'^apps/releases/?$', AppReleaseView.as_view(),\n name='app-release-create'),\n url(r'^apps/?$', AppRegisterView.as_view(), name='app-register'),\n url(r'^apps/(?P<pk>[a-z_]+)/?$', AppView.as_view(), name='app-delete'),\n url(r'^ratings.json$',\n etag(app_ratings_etag)(AppRatingView.as_view()),\n name='app-ratings'),\n url(r'^apps/(?P<app>[a-z_]+)/releases/(?:(?P<nightly>nightly)/)?'\n r'(?P<version>' + SEMVER_REGEX + ')/?$',\n AppReleaseView.as_view(), name='app-release-delete'),\n url(r'^token/?$', SessionObtainAuthToken.as_view(), name='user-token'),\n url(r'^token/new/?$', RegenerateAuthToken.as_view(),\n name='user-token-new'),\n url(r'^categories.json$',\n etag(categories_etag)(CategoryView.as_view()), name='category'),\n]\n", "path": "nextcloudappstore/core/api/v1/urls.py"}]} | 1,070 | 197 |
gh_patches_debug_6773 | rasdani/github-patches | git_diff | spacetelescope__jwql-517 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
DEPENDENCY_LINKS in setup.py causing bug in logging_functions
With the introduction of the `DEPENDENCY_LINKS` variable in `setup.py`, the logging of monitors is now failing to log the versions of depenencies listed, since the `REQUIRES` is not immediately followed by `setup()`:
```python
for i, line in enumerate(data):
if 'REQUIRES = [' in line:
begin = i + 1
elif 'setup(' in line:
end = i - 2
```
The solution is so simple move `DEPENDENCY _LINKS` to be defined before `REQUIRES`.
</issue>
<code>
[start of setup.py]
1 import numpy as np
2 from setuptools import setup
3 from setuptools import find_packages
4
5 VERSION = '0.22.0'
6
7 AUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '
8 AUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'
9
10 DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
11
12 REQUIRES = [
13 'asdf>=2.3.3',
14 'astropy>=3.2.1',
15 'astroquery>=0.3.9',
16 'authlib',
17 'bokeh>=1.0',
18 'codecov',
19 'django>=2.0',
20 'flake8',
21 'inflection',
22 'ipython',
23 'jinja2',
24 'jsonschema==2.6.0',
25 'jwedb>=0.0.3',
26 'matplotlib',
27 'numpy',
28 'numpydoc',
29 'pandas',
30 'psycopg2',
31 'pysiaf',
32 'pytest',
33 'pytest-cov',
34 'scipy',
35 'sphinx',
36 'sqlalchemy',
37 'stsci_rtd_theme',
38 'twine'
39 ]
40
41 DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.13.0']
42
43 setup(
44 name='jwql',
45 version=VERSION,
46 description=DESCRIPTION,
47 url='https://github.com/spacetelescope/jwql.git',
48 author=AUTHORS,
49 author_email='[email protected]',
50 license='BSD',
51 keywords=['astronomy', 'python'],
52 classifiers=['Programming Language :: Python'],
53 packages=find_packages(),
54 install_requires=REQUIRES,
55 dependency_links=DEPENDENCY_LINKS,
56 include_package_data=True,
57 include_dirs=[np.get_include()],
58 )
59
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -9,6 +9,7 @@
DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'
+DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.13.0']
REQUIRES = [
'asdf>=2.3.3',
'astropy>=3.2.1',
@@ -38,8 +39,6 @@
'twine'
]
-DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.13.0']
-
setup(
name='jwql',
version=VERSION,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,6 +9,7 @@\n \n DESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n \n+DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.13.0']\n REQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n@@ -38,8 +39,6 @@\n 'twine'\n ]\n \n-DEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.13.0']\n-\n setup(\n name='jwql',\n version=VERSION,\n", "issue": "DEPENDENCY_LINKS in setup.py causing bug in logging_functions\nWith the introduction of the `DEPENDENCY_LINKS` variable in `setup.py`, the logging of monitors is now failing to log the versions of depenencies listed, since the `REQUIRES` is not immediately followed by `setup()`:\r\n\r\n```python\r\nfor i, line in enumerate(data):\r\n if 'REQUIRES = [' in line:\r\n begin = i + 1\r\n elif 'setup(' in line:\r\n end = i - 2\r\n```\r\n\r\nThe solution is so simple move `DEPENDENCY _LINKS` to be defined before `REQUIRES`.\n", "before_files": [{"content": "import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.22.0'\n\nAUTHORS = 'Matthew Bourque, Misty Cracraft, Joe Filippazzo, Bryan Hilbert, '\nAUTHORS += 'Graham Kanarek, Catherine Martlin, Johannes Sahlmann, Ben Sunnquist'\n\nDESCRIPTION = 'The James Webb Space Telescope Quicklook Project'\n\nREQUIRES = [\n 'asdf>=2.3.3',\n 'astropy>=3.2.1',\n 'astroquery>=0.3.9',\n 'authlib',\n 'bokeh>=1.0',\n 'codecov',\n 'django>=2.0',\n 'flake8',\n 'inflection',\n 'ipython',\n 'jinja2',\n 'jsonschema==2.6.0',\n 'jwedb>=0.0.3',\n 'matplotlib',\n 'numpy',\n 'numpydoc',\n 'pandas',\n 'psycopg2',\n 'pysiaf',\n 'pytest',\n 'pytest-cov',\n 'scipy',\n 'sphinx',\n 'sqlalchemy',\n 'stsci_rtd_theme',\n 'twine'\n]\n\nDEPENDENCY_LINKS = ['git+https://github.com/spacetelescope/jwst#0.13.0']\n\nsetup(\n name='jwql',\n version=VERSION,\n description=DESCRIPTION,\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='[email protected]',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n dependency_links=DEPENDENCY_LINKS,\n include_package_data=True,\n include_dirs=[np.get_include()],\n)\n", "path": "setup.py"}]} | 1,174 | 159 |
gh_patches_debug_35261 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-3264 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
First try at building a parser using data from Quebec
Hopefully this will show up on the map somehow. I look forward to seeing what changes will be made in order to make this parser functional.
</issue>
<code>
[start of parsers/CA_QC.py]
1 import requests
2 import logging
3 from pprint import pprint
4 # The arrow library is used to handle datetimes
5 import arrow
6
7 PRODUCTION_URL = "https://www.hydroquebec.com/data/documents-donnees/donnees-ouvertes/json/production.json"
8 CONSUMPTION_URL = "https://www.hydroquebec.com/data/documents-donnees/donnees-ouvertes/json/demande.json"
9 # Reluctant to call it 'timezone', since we are importing 'timezone' from datetime
10 timezone_id = 'America/Montreal'
11
12 def fetch_production(
13 zone_key="CA-QC",
14 session=None,
15 target_datetime=None,
16 logger=logging.getLogger(__name__),
17 ) -> dict:
18 """Requests the last known production mix (in MW) of a given region.
19 In this particular case, translated mapping of JSON keys are also required"""
20
21 def if_exists(elem: dict, etype: str):
22
23 english = {
24 "hydraulique": "hydro",
25 "thermique": "thermal",
26 "solaire": "solar",
27 "eolien": "wind",
28 "autres": "unknown",
29 "valeurs": "values",
30 }
31 english = {v: k for k, v in english.items()}
32 try:
33 return elem["valeurs"][english[etype]]
34 except KeyError:
35 return 0.0
36
37 data = _fetch_quebec_production()
38 for elem in reversed(data["details"]):
39 if elem["valeurs"]["total"] != 0:
40
41 return {
42 "zoneKey": zone_key,
43 "datetime": arrow.get(elem["date"], tzinfo=timezone_id).datetime,
44 "production": {
45 "biomass": 0.0,
46 "coal": 0.0,
47
48 # per https://github.com/tmrowco/electricitymap-contrib/issues/3218 , thermal generation
49 # is at Bécancour gas turbine. It is reported with a delay, and data source returning 0.0
50 # can indicate either no generation or not-yet-reported generation.
51 # To handle this, if reported value is 0.0, overwrite it to None, so that backend can know
52 # this is not entirely reliable and might be updated later.
53 "gas": if_exists(elem, "thermal") or None,
54
55 "hydro": if_exists(elem, "hydro"),
56 "nuclear": 0.0,
57 "oil": 0.0,
58 "solar": if_exists(elem, "solar"),
59 "wind": if_exists(elem, "wind"),
60 "geothermal": 0.0,
61 "unknown": if_exists(elem, "unknown"),
62 },
63 "source": "hydroquebec.com",
64 }
65
66
67 def fetch_consumption(zone_key="CA-QC", session=None, target_datetime=None, logger=None):
68 data = _fetch_quebec_consumption()
69 for elem in reversed(data["details"]):
70 if "demandeTotal" in elem["valeurs"]:
71 return {
72 "zoneKey": zone_key,
73 "datetime": arrow.get(elem["date"], tzinfo=timezone_id).datetime,
74 "consumption": elem["valeurs"]["demandeTotal"],
75 "source": "hydroquebec.com",
76 }
77
78
79 def _fetch_quebec_production(logger=logging.getLogger(__name__)) -> str:
80 response = requests.get(PRODUCTION_URL)
81
82 if not response.ok:
83 logger.info('CA-QC: failed getting requested production data from hydroquebec - URL {}'.format(PRODUCTION_URL))
84 return response.json()
85
86
87 def _fetch_quebec_consumption(logger=logging.getLogger(__name__)) -> str:
88 response = requests.get(CONSUMPTION_URL)
89
90 if not response.ok:
91 logger.info('CA-QC: failed getting requested consumption data from hydroquebec - URL {}'.format(CONSUMPTION_URL))
92 return response.json()
93
94
95 if __name__ == '__main__':
96 """Main method, never used by the Electricity Map backend, but handy for testing."""
97
98 test_logger = logging.getLogger()
99
100 print('fetch_production() ->')
101 pprint(fetch_production(logger=test_logger))
102
103 print('fetch_consumption() ->')
104 pprint(fetch_consumption(logger=test_logger))
105
[end of parsers/CA_QC.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/parsers/CA_QC.py b/parsers/CA_QC.py
--- a/parsers/CA_QC.py
+++ b/parsers/CA_QC.py
@@ -25,8 +25,9 @@
"thermique": "thermal",
"solaire": "solar",
"eolien": "wind",
- "autres": "unknown",
- "valeurs": "values",
+ # autres is all renewable, and mostly biomass. See Github #3218
+ "autres": "biomass",
+ "valeurs": "values"
}
english = {v: k for k, v in english.items()}
try:
@@ -42,21 +43,18 @@
"zoneKey": zone_key,
"datetime": arrow.get(elem["date"], tzinfo=timezone_id).datetime,
"production": {
- "biomass": 0.0,
+ "biomass": if_exists(elem, "biomass"),
"coal": 0.0,
-
- # per https://github.com/tmrowco/electricitymap-contrib/issues/3218 , thermal generation
- # is at Bécancour gas turbine. It is reported with a delay, and data source returning 0.0
- # can indicate either no generation or not-yet-reported generation.
- # To handle this, if reported value is 0.0, overwrite it to None, so that backend can know
- # this is not entirely reliable and might be updated later.
- "gas": if_exists(elem, "thermal") or None,
-
"hydro": if_exists(elem, "hydro"),
"nuclear": 0.0,
"oil": 0.0,
"solar": if_exists(elem, "solar"),
"wind": if_exists(elem, "wind"),
+ # See Github issue #3218, Québec's thermal generation is at Bécancour gas turbine.
+ # It is reported with a delay, and data source returning 0.0 can indicate either no generation or not-yet-reported generation.
+ # Thus, if value is 0.0, overwrite it to None, so that backend can know this is not entirely reliable and might be updated later.
+ "gas": if_exists(elem, "thermal") or None,
+ # There are no geothermal electricity generation stations in Québec (and all of Canada for that matter).
"geothermal": 0.0,
"unknown": if_exists(elem, "unknown"),
},
| {"golden_diff": "diff --git a/parsers/CA_QC.py b/parsers/CA_QC.py\n--- a/parsers/CA_QC.py\n+++ b/parsers/CA_QC.py\n@@ -25,8 +25,9 @@\n \"thermique\": \"thermal\",\n \"solaire\": \"solar\",\n \"eolien\": \"wind\",\n- \"autres\": \"unknown\",\n- \"valeurs\": \"values\",\n+ # autres is all renewable, and mostly biomass. See Github #3218\n+ \"autres\": \"biomass\",\n+ \"valeurs\": \"values\"\n }\n english = {v: k for k, v in english.items()}\n try:\n@@ -42,21 +43,18 @@\n \"zoneKey\": zone_key,\n \"datetime\": arrow.get(elem[\"date\"], tzinfo=timezone_id).datetime,\n \"production\": {\n- \"biomass\": 0.0,\n+ \"biomass\": if_exists(elem, \"biomass\"),\n \"coal\": 0.0,\n-\n- # per https://github.com/tmrowco/electricitymap-contrib/issues/3218 , thermal generation\n- # is at B\u00e9cancour gas turbine. It is reported with a delay, and data source returning 0.0\n- # can indicate either no generation or not-yet-reported generation.\n- # To handle this, if reported value is 0.0, overwrite it to None, so that backend can know\n- # this is not entirely reliable and might be updated later.\n- \"gas\": if_exists(elem, \"thermal\") or None,\n-\n \"hydro\": if_exists(elem, \"hydro\"),\n \"nuclear\": 0.0,\n \"oil\": 0.0,\n \"solar\": if_exists(elem, \"solar\"),\n \"wind\": if_exists(elem, \"wind\"),\n+ # See Github issue #3218, Qu\u00e9bec's thermal generation is at B\u00e9cancour gas turbine.\n+ # It is reported with a delay, and data source returning 0.0 can indicate either no generation or not-yet-reported generation.\n+ # Thus, if value is 0.0, overwrite it to None, so that backend can know this is not entirely reliable and might be updated later.\n+ \"gas\": if_exists(elem, \"thermal\") or None,\n+ # There are no geothermal electricity generation stations in Qu\u00e9bec (and all of Canada for that matter).\n \"geothermal\": 0.0,\n \"unknown\": if_exists(elem, \"unknown\"),\n },\n", "issue": "First try at building a parser using data from Quebec\nHopefully this will show up on the map somehow. I look forward to seeing what changes will be made in order to make this parser functional. \n", "before_files": [{"content": "import requests\nimport logging\nfrom pprint import pprint\n# The arrow library is used to handle datetimes\nimport arrow\n\nPRODUCTION_URL = \"https://www.hydroquebec.com/data/documents-donnees/donnees-ouvertes/json/production.json\"\nCONSUMPTION_URL = \"https://www.hydroquebec.com/data/documents-donnees/donnees-ouvertes/json/demande.json\"\n# Reluctant to call it 'timezone', since we are importing 'timezone' from datetime\ntimezone_id = 'America/Montreal'\n\ndef fetch_production(\n zone_key=\"CA-QC\",\n session=None,\n target_datetime=None,\n logger=logging.getLogger(__name__),\n) -> dict:\n \"\"\"Requests the last known production mix (in MW) of a given region.\n In this particular case, translated mapping of JSON keys are also required\"\"\"\n\n def if_exists(elem: dict, etype: str):\n\n english = {\n \"hydraulique\": \"hydro\",\n \"thermique\": \"thermal\",\n \"solaire\": \"solar\",\n \"eolien\": \"wind\",\n \"autres\": \"unknown\",\n \"valeurs\": \"values\",\n }\n english = {v: k for k, v in english.items()}\n try:\n return elem[\"valeurs\"][english[etype]]\n except KeyError:\n return 0.0\n\n data = _fetch_quebec_production()\n for elem in reversed(data[\"details\"]):\n if elem[\"valeurs\"][\"total\"] != 0:\n\n return {\n \"zoneKey\": zone_key,\n \"datetime\": arrow.get(elem[\"date\"], tzinfo=timezone_id).datetime,\n \"production\": {\n \"biomass\": 0.0,\n \"coal\": 0.0,\n\n # per https://github.com/tmrowco/electricitymap-contrib/issues/3218 , thermal generation\n # is at B\u00e9cancour gas turbine. It is reported with a delay, and data source returning 0.0\n # can indicate either no generation or not-yet-reported generation.\n # To handle this, if reported value is 0.0, overwrite it to None, so that backend can know\n # this is not entirely reliable and might be updated later.\n \"gas\": if_exists(elem, \"thermal\") or None,\n\n \"hydro\": if_exists(elem, \"hydro\"),\n \"nuclear\": 0.0,\n \"oil\": 0.0,\n \"solar\": if_exists(elem, \"solar\"),\n \"wind\": if_exists(elem, \"wind\"),\n \"geothermal\": 0.0,\n \"unknown\": if_exists(elem, \"unknown\"),\n },\n \"source\": \"hydroquebec.com\",\n }\n\n\ndef fetch_consumption(zone_key=\"CA-QC\", session=None, target_datetime=None, logger=None):\n data = _fetch_quebec_consumption()\n for elem in reversed(data[\"details\"]):\n if \"demandeTotal\" in elem[\"valeurs\"]:\n return {\n \"zoneKey\": zone_key,\n \"datetime\": arrow.get(elem[\"date\"], tzinfo=timezone_id).datetime,\n \"consumption\": elem[\"valeurs\"][\"demandeTotal\"],\n \"source\": \"hydroquebec.com\",\n }\n\n\ndef _fetch_quebec_production(logger=logging.getLogger(__name__)) -> str:\n response = requests.get(PRODUCTION_URL)\n\n if not response.ok:\n logger.info('CA-QC: failed getting requested production data from hydroquebec - URL {}'.format(PRODUCTION_URL))\n return response.json()\n\n\ndef _fetch_quebec_consumption(logger=logging.getLogger(__name__)) -> str:\n response = requests.get(CONSUMPTION_URL)\n\n if not response.ok:\n logger.info('CA-QC: failed getting requested consumption data from hydroquebec - URL {}'.format(CONSUMPTION_URL))\n return response.json()\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n test_logger = logging.getLogger()\n\n print('fetch_production() ->')\n pprint(fetch_production(logger=test_logger))\n\n print('fetch_consumption() ->')\n pprint(fetch_consumption(logger=test_logger))\n", "path": "parsers/CA_QC.py"}]} | 1,694 | 574 |
gh_patches_debug_18230 | rasdani/github-patches | git_diff | pymodbus-dev__pymodbus-408 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Pymodbus 2.2.0rc2 failing to read registers/coils
<!--
Please use the Pymodbus gitter channel at https://gitter.im/pymodbus_dev/Lobby or Stack Overflow(tag [pymodbus](https://stackoverflow.com/questions/tagged/pymodbus) for
support questions.
Before opening a new issue, make sure you do the following:
* check that your issue isn't already filed: https://github.com/riptideio/pymodbus/issues
* prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus
-->
### Versions
* Python: 2.7.12
* OS: OSX
* Pymodbus: 2.2.0rc2
* Modbus Hardware (if used): NA
### Pymodbus Specific
* Server: tcp/rtu/ascii - sync/async --> all
* Client: tcp/rtu/ascii - sync/async --> all
### Description
Basic operations on TCP to read coils/registers failing with `[Input/Output] No Response received from the remote unit/Unable to decode response`.
The same code works just fine in earlier versions of pymodbus (2.2.0rc1 and below)
### Code and Logs
```python
# code and logs here.
```
</issue>
<code>
[start of doc/conf.py]
1 # -*- coding: utf-8 -*-
2 #
3 # PyModbus documentation build configuration file, created by
4 # sphinx-quickstart on Wed Dec 20 12:31:10 2017.
5 #
6 # This file is execfile()d with the current directory set to its
7 # containing dir.
8 #
9 # Note that not all possible configuration values are present in this
10 # autogenerated file.
11 #
12 # All configuration values have a default; values that are commented out
13 # serve to show the default.
14
15 # If extensions (or modules to document with autodoc) are in another directory,
16 # add these directories to sys.path here. If the directory is relative to the
17 # documentation root, use os.path.abspath to make it absolute, like shown here.
18 #
19 import os
20 import sys
21 import recommonmark
22 from recommonmark.parser import CommonMarkParser
23 from recommonmark.transform import AutoStructify
24 from pymodbus import __version__
25 parent_dir = os.path.abspath(os.pardir)
26 # examples = os.path.join(parent_dir, "examples")
27 example_contrib = os.path.join(parent_dir, "examples/contrib")
28 example_common = os.path.join(parent_dir, "examples/common")
29 example_gui = os.path.join(parent_dir, "examples/gui")
30
31 sys.path.insert(0, os.path.abspath(os.pardir))
32 sys.path.append(example_common)
33 sys.path.append(example_contrib)
34 sys.path.append(example_gui)
35 # sys.path.extend([examples, example_common, example_contrib, example_gui])
36 # sys.path.insert(0, os.path.abspath('../'))
37
38 github_doc_root = 'https://github.com/riptideio/pymodbus/tree/master/doc/'
39 # -- General configuration ------------------------------------------------
40
41 # If your documentation needs a minimal Sphinx version, state it here.
42 #
43 # needs_sphinx = '1.0'
44
45 # Add any Sphinx extension module names here, as strings. They can be
46 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
47 # ones.
48 extensions = ['sphinx.ext.autodoc', 'm2r', 'recommonmark']
49
50 # Add any paths that contain templates here, relative to this directory.
51 templates_path = ['_templates']
52
53 # The suffix(es) of source filenames.
54 # You can specify multiple suffix as a list of string:
55 #
56 source_parsers = {
57 '.md': CommonMarkParser,
58 }
59
60 source_suffix = ['.rst', '.md']
61 # source_suffix = '.rst'
62
63 # The master toctree document.
64 master_doc = 'index'
65
66 # General information about the project.
67 project = u'PyModbus'
68 copyright = u'2017, Sanjay'
69 author = u'Sanjay'
70
71 # The version info for the project you're documenting, acts as replacement for
72 # |version| and |release|, also used in various other places throughout the
73 # built documents.
74 #
75 # The short X.Y version.
76 version = __version__
77 # The full version, including alpha/beta/rc tags.
78 release = __version__
79
80 # The language for content autogenerated by Sphinx. Refer to documentation
81 # for a list of supported languages.
82 #
83 # This is also used if you do content translation via gettext catalogs.
84 # Usually you set "language" from the command line for these cases.
85 language = None
86
87 # List of patterns, relative to source directory, that match files and
88 # directories to ignore when looking for source files.
89 # This patterns also effect to html_static_path and html_extra_path
90 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
91
92 # The name of the Pygments (syntax highlighting) style to use.
93 pygments_style = 'sphinx'
94
95 # If true, `todo` and `todoList` produce output, else they produce nothing.
96 todo_include_todos = False
97
98
99 # -- Options for HTML output ----------------------------------------------
100
101 # The theme to use for HTML and HTML Help pages. See the documentation for
102 # a list of builtin themes.
103 #
104 html_theme = 'sphinx_rtd_theme'
105
106 # Theme options are theme-specific and customize the look and feel of a theme
107 # further. For a list of options available for each theme, see the
108 # documentation.
109 #
110 # html_theme_options = {}
111
112 # Add any paths that contain custom static files (such as style sheets) here,
113 # relative to this directory. They are copied after the builtin static files,
114 # so a file named "default.css" will overwrite the builtin "default.css".
115 # html_static_path = ['_static']
116 html_static_path = []
117
118 # Custom sidebar templates, must be a dictionary that maps document names
119 # to template names.
120 #
121 # This is required for the alabaster theme
122 # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
123 html_sidebars = {
124 '**': [
125 'relations.html', # needs 'show_related': True theme option to display
126 'searchbox.html',
127 ]
128 }
129
130
131 # -- Options for HTMLHelp output ------------------------------------------
132
133 # Output file base name for HTML help builder.
134 htmlhelp_basename = 'PyModbusdoc'
135
136
137 # -- Options for LaTeX output ---------------------------------------------
138
139 latex_elements = {
140 # The paper size ('letterpaper' or 'a4paper').
141 #
142 # 'papersize': 'letterpaper',
143
144 # The font size ('10pt', '11pt' or '12pt').
145 #
146 # 'pointsize': '10pt',
147
148 # Additional stuff for the LaTeX preamble.
149 #
150 # 'preamble': '',
151
152 # Latex figure (float) alignment
153 #
154 # 'figure_align': 'htbp',
155 }
156
157 # Grouping the document tree into LaTeX files. List of tuples
158 # (source start file, target name, title,
159 # author, documentclass [howto, manual, or own class]).
160 latex_documents = [
161 (master_doc, 'PyModbus.tex', u'PyModbus Documentation',
162 u'Sanjay', 'manual'),
163 ]
164
165
166 # -- Options for manual page output ---------------------------------------
167
168 # One entry per manual page. List of tuples
169 # (source start file, name, description, authors, manual section).
170 man_pages = [
171 (master_doc, 'pymodbus', u'PyModbus Documentation',
172 [author], 1)
173 ]
174
175
176 # -- Options for Texinfo output -------------------------------------------
177
178 # Grouping the document tree into Texinfo files. List of tuples
179 # (source start file, target name, title, author,
180 # dir menu entry, description, category)
181 texinfo_documents = [
182 (master_doc, 'PyModbus', u'PyModbus Documentation',
183 author, 'PyModbus', 'One line description of project.',
184 'Miscellaneous'),
185 ]
186
187
188 def setup(app):
189 app.add_config_value('recommonmark_config', {
190 'url_resolver': lambda url: github_doc_root + url,
191 'auto_toc_tree_section': 'Contents',
192 }, True)
193 app.add_transform(AutoStructify)
194
195
[end of doc/conf.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/doc/conf.py b/doc/conf.py
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -45,7 +45,9 @@
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = ['sphinx.ext.autodoc', 'm2r', 'recommonmark']
+
+#extensions = ['sphinx.ext.autodoc', 'm2r', 'recommonmark']
+extensions = ['sphinx.ext.autodoc', 'm2r']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -53,9 +55,9 @@
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
-source_parsers = {
- '.md': CommonMarkParser,
-}
+#source_parsers = {
+# '.md': CommonMarkParser,
+#}
source_suffix = ['.rst', '.md']
# source_suffix = '.rst'
| {"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -45,7 +45,9 @@\n # Add any Sphinx extension module names here, as strings. They can be\n # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n # ones.\n-extensions = ['sphinx.ext.autodoc', 'm2r', 'recommonmark']\n+\n+#extensions = ['sphinx.ext.autodoc', 'm2r', 'recommonmark']\n+extensions = ['sphinx.ext.autodoc', 'm2r']\n \n # Add any paths that contain templates here, relative to this directory.\n templates_path = ['_templates']\n@@ -53,9 +55,9 @@\n # The suffix(es) of source filenames.\n # You can specify multiple suffix as a list of string:\n #\n-source_parsers = {\n- '.md': CommonMarkParser,\n-}\n+#source_parsers = {\n+# '.md': CommonMarkParser,\n+#}\n \n source_suffix = ['.rst', '.md']\n # source_suffix = '.rst'\n", "issue": "Pymodbus 2.2.0rc2 failing to read registers/coils\n<!--\r\nPlease use the Pymodbus gitter channel at https://gitter.im/pymodbus_dev/Lobby or Stack Overflow(tag [pymodbus](https://stackoverflow.com/questions/tagged/pymodbus) for\r\nsupport questions.\r\n\r\nBefore opening a new issue, make sure you do the following:\r\n * check that your issue isn't already filed: https://github.com/riptideio/pymodbus/issues\r\n * prepare a short, runnable example that reproduce the issue with the latest development version of Pymodbus\r\n-->\r\n\r\n### Versions\r\n\r\n* Python: 2.7.12\r\n* OS: OSX\r\n* Pymodbus: 2.2.0rc2\r\n* Modbus Hardware (if used): NA\r\n\r\n### Pymodbus Specific\r\n* Server: tcp/rtu/ascii - sync/async --> all\r\n* Client: tcp/rtu/ascii - sync/async --> all\r\n\r\n### Description\r\nBasic operations on TCP to read coils/registers failing with `[Input/Output] No Response received from the remote unit/Unable to decode response`.\r\n The same code works just fine in earlier versions of pymodbus (2.2.0rc1 and below)\r\n### Code and Logs\r\n\r\n```python\r\n # code and logs here.\r\n\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# PyModbus documentation build configuration file, created by\n# sphinx-quickstart on Wed Dec 20 12:31:10 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\nimport recommonmark\nfrom recommonmark.parser import CommonMarkParser\nfrom recommonmark.transform import AutoStructify\nfrom pymodbus import __version__\nparent_dir = os.path.abspath(os.pardir)\n# examples = os.path.join(parent_dir, \"examples\")\nexample_contrib = os.path.join(parent_dir, \"examples/contrib\")\nexample_common = os.path.join(parent_dir, \"examples/common\")\nexample_gui = os.path.join(parent_dir, \"examples/gui\")\n\nsys.path.insert(0, os.path.abspath(os.pardir))\nsys.path.append(example_common)\nsys.path.append(example_contrib)\nsys.path.append(example_gui)\n# sys.path.extend([examples, example_common, example_contrib, example_gui])\n# sys.path.insert(0, os.path.abspath('../'))\n\ngithub_doc_root = 'https://github.com/riptideio/pymodbus/tree/master/doc/'\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.autodoc', 'm2r', 'recommonmark']\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_parsers = {\n '.md': CommonMarkParser,\n}\n\nsource_suffix = ['.rst', '.md']\n# source_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'PyModbus'\ncopyright = u'2017, Sanjay'\nauthor = u'Sanjay'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = __version__\n# The full version, including alpha/beta/rc tags.\nrelease = __version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\nhtml_static_path = []\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n 'relations.html', # needs 'show_related': True theme option to display\n 'searchbox.html',\n ]\n}\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'PyModbusdoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'PyModbus.tex', u'PyModbus Documentation',\n u'Sanjay', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'pymodbus', u'PyModbus Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'PyModbus', u'PyModbus Documentation',\n author, 'PyModbus', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\ndef setup(app):\n app.add_config_value('recommonmark_config', {\n 'url_resolver': lambda url: github_doc_root + url,\n 'auto_toc_tree_section': 'Contents',\n }, True)\n app.add_transform(AutoStructify)\n\n", "path": "doc/conf.py"}]} | 2,785 | 237 |
gh_patches_debug_13280 | rasdani/github-patches | git_diff | pyca__cryptography-10345 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Allow verifying an x509 cert chain without making assertions about the subject name
Thanks to all who worked on the X.509 verification support in version 42.
I am trying to use this API for verifying a signing certificate, and realizing that the API requires me to assert a subject name (DNS name or IP address) to get the validation output. The subject name is not defined/not relevant in this application.
How can I verify that a certificate is in the chain of trust without asserting on the subject name?
</issue>
<code>
[start of src/cryptography/x509/verification.py]
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import annotations
6
7 import typing
8
9 from cryptography.hazmat.bindings._rust import x509 as rust_x509
10 from cryptography.x509.general_name import DNSName, IPAddress
11
12 __all__ = [
13 "Store",
14 "Subject",
15 "ServerVerifier",
16 "PolicyBuilder",
17 "VerificationError",
18 ]
19
20 Store = rust_x509.Store
21 Subject = typing.Union[DNSName, IPAddress]
22 ServerVerifier = rust_x509.ServerVerifier
23 PolicyBuilder = rust_x509.PolicyBuilder
24 VerificationError = rust_x509.VerificationError
25
[end of src/cryptography/x509/verification.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/cryptography/x509/verification.py b/src/cryptography/x509/verification.py
--- a/src/cryptography/x509/verification.py
+++ b/src/cryptography/x509/verification.py
@@ -12,6 +12,8 @@
__all__ = [
"Store",
"Subject",
+ "VerifiedClient",
+ "ClientVerifier",
"ServerVerifier",
"PolicyBuilder",
"VerificationError",
@@ -19,6 +21,8 @@
Store = rust_x509.Store
Subject = typing.Union[DNSName, IPAddress]
+VerifiedClient = rust_x509.VerifiedClient
+ClientVerifier = rust_x509.ClientVerifier
ServerVerifier = rust_x509.ServerVerifier
PolicyBuilder = rust_x509.PolicyBuilder
VerificationError = rust_x509.VerificationError
| {"golden_diff": "diff --git a/src/cryptography/x509/verification.py b/src/cryptography/x509/verification.py\n--- a/src/cryptography/x509/verification.py\n+++ b/src/cryptography/x509/verification.py\n@@ -12,6 +12,8 @@\n __all__ = [\n \"Store\",\n \"Subject\",\n+ \"VerifiedClient\",\n+ \"ClientVerifier\",\n \"ServerVerifier\",\n \"PolicyBuilder\",\n \"VerificationError\",\n@@ -19,6 +21,8 @@\n \n Store = rust_x509.Store\n Subject = typing.Union[DNSName, IPAddress]\n+VerifiedClient = rust_x509.VerifiedClient\n+ClientVerifier = rust_x509.ClientVerifier\n ServerVerifier = rust_x509.ServerVerifier\n PolicyBuilder = rust_x509.PolicyBuilder\n VerificationError = rust_x509.VerificationError\n", "issue": "Allow verifying an x509 cert chain without making assertions about the subject name\nThanks to all who worked on the X.509 verification support in version 42.\r\n\r\nI am trying to use this API for verifying a signing certificate, and realizing that the API requires me to assert a subject name (DNS name or IP address) to get the validation output. The subject name is not defined/not relevant in this application.\r\n\r\nHow can I verify that a certificate is in the chain of trust without asserting on the subject name?\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import annotations\n\nimport typing\n\nfrom cryptography.hazmat.bindings._rust import x509 as rust_x509\nfrom cryptography.x509.general_name import DNSName, IPAddress\n\n__all__ = [\n \"Store\",\n \"Subject\",\n \"ServerVerifier\",\n \"PolicyBuilder\",\n \"VerificationError\",\n]\n\nStore = rust_x509.Store\nSubject = typing.Union[DNSName, IPAddress]\nServerVerifier = rust_x509.ServerVerifier\nPolicyBuilder = rust_x509.PolicyBuilder\nVerificationError = rust_x509.VerificationError\n", "path": "src/cryptography/x509/verification.py"}]} | 870 | 197 |
gh_patches_debug_36591 | rasdani/github-patches | git_diff | pyodide__pyodide-1334 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Potential memory leak in WebLoop.py
WebLoop.py uses `js.setTimeout` to implement call_* functions. The code is:
```
h = asyncio.Handle(callback, args, self, context=context)
setTimeout(h._run, delay * 1000)
return h
```
https://github.com/iodide-project/pyodide/blob/de7c3420103fabbf01c589fcf2a429d157a6bd5e/src/pyodide-py/pyodide/webloop.py#L139
I think this may result in a memory leak, since the `h._run` JS proxy is never released.
</issue>
<code>
[start of src/pyodide-py/pyodide/webloop.py]
1 import asyncio
2 from asyncio import tasks, futures
3 import time
4 import contextvars
5
6
7 from typing import Callable
8
9
10 class WebLoop(asyncio.AbstractEventLoop):
11 """A custom event loop for use in Pyodide.
12
13 Schedules tasks on the browser event loop. Does no lifecycle management and runs
14 forever.
15
16 ``run_forever`` and ``run_until_complete`` cannot block like a normal event loop would
17 because we only have one thread so blocking would stall the browser event loop
18 and prevent anything from ever happening.
19
20 We defer all work to the browser event loop using the setTimeout function.
21 To ensure that this event loop doesn't stall out UI and other browser handling,
22 we want to make sure that each task is scheduled on the browser event loop as a
23 task not as a microtask. ``setTimeout(callback, 0)`` enqueues the callback as a
24 task so it works well for our purposes.
25 """
26
27 def __init__(self):
28 self._task_factory = None
29 asyncio._set_running_loop(self)
30
31 def get_debug(self):
32 return False
33
34 #
35 # Lifecycle methods: We ignore all lifecycle management
36 #
37
38 def is_running(self) -> bool:
39 """Returns ``True`` if the event loop is running.
40
41 Always returns ``True`` because WebLoop has no lifecycle management.
42 """
43 return True
44
45 def is_closed(self) -> bool:
46 """Returns ``True`` if the event loop was closed.
47
48 Always returns ``False`` because WebLoop has no lifecycle management.
49 """
50 return False
51
52 def _check_closed(self):
53 """Used in create_task.
54
55 Would raise an error if ``self.is_closed()``, but we are skipping all lifecycle stuff.
56 """
57 pass
58
59 def run_forever(self):
60 """Run the event loop forever. Does nothing in this implementation.
61
62 We cannot block like a normal event loop would
63 because we only have one thread so blocking would stall the browser event loop
64 and prevent anything from ever happening.
65 """
66 pass
67
68 def run_until_complete(self, future):
69 """Run until future is done.
70
71 If the argument is a coroutine, it is wrapped in a Task.
72
73 The native event loop `run_until_complete` blocks until evaluation of the
74 future is complete and then returns the result of the future.
75 Since we cannot block, we just ensure that the future is scheduled and
76 return the future. This makes this method a bit useless. Instead, use
77 `future.add_done_callback(do_something_with_result)` or:
78 ```python
79 async def wrapper():
80 result = await future
81 do_something_with_result(result)
82 ```
83 """
84 return asyncio.ensure_future(future)
85
86 #
87 # Scheduling methods: use browser.setTimeout to schedule tasks on the browser event loop.
88 #
89
90 def call_soon(self, callback: Callable, *args, context: contextvars.Context = None):
91 """Arrange for a callback to be called as soon as possible.
92
93 Any positional arguments after the callback will be passed to
94 the callback when it is called.
95
96 This schedules the callback on the browser event loop using ``setTimeout(callback, 0)``.
97 """
98 delay = 0
99 return self.call_later(delay, callback, *args, context=context)
100
101 def call_soon_threadsafe(
102 self, callback: Callable, *args, context: contextvars.Context = None
103 ):
104 """Like ``call_soon()``, but thread-safe.
105
106 We have no threads so everything is "thread safe", and we just use ``call_soon``.
107 """
108 return self.call_soon(callback, *args, context=context)
109
110 def call_later(
111 self,
112 delay: float,
113 callback: Callable,
114 *args,
115 context: contextvars.Context = None
116 ):
117 """Arrange for a callback to be called at a given time.
118
119 Return a Handle: an opaque object with a cancel() method that
120 can be used to cancel the call.
121
122 The delay can be an int or float, expressed in seconds. It is
123 always relative to the current time.
124
125 Each callback will be called exactly once. If two callbacks
126 are scheduled for exactly the same time, it undefined which
127 will be called first.
128
129 Any positional arguments after the callback will be passed to
130 the callback when it is called.
131
132 This uses `setTimeout(callback, delay)`
133 """
134 from js import setTimeout
135
136 if delay < 0:
137 raise ValueError("Can't schedule in the past")
138 h = asyncio.Handle(callback, args, self, context=context)
139 setTimeout(h._run, delay * 1000)
140 return h
141
142 def call_at(
143 self,
144 when: float,
145 callback: Callable,
146 *args,
147 context: contextvars.Context = None
148 ):
149 """Like ``call_later()``, but uses an absolute time.
150
151 Absolute time corresponds to the event loop's ``time()`` method.
152
153 This uses ``setTimeout(callback, when - cur_time)``
154 """
155 cur_time = self.time()
156 delay = when - cur_time
157 return self.call_later(delay, callback, *args, context=context)
158
159 #
160 # The remaining methods are copied directly from BaseEventLoop
161 #
162
163 def time(self):
164 """Return the time according to the event loop's clock.
165
166 This is a float expressed in seconds since an epoch, but the
167 epoch, precision, accuracy and drift are unspecified and may
168 differ per event loop.
169
170 Copied from ``BaseEventLoop.time``
171 """
172 return time.monotonic()
173
174 def create_future(self):
175 """Create a Future object attached to the loop.
176
177 Copied from ``BaseEventLoop.create_future``
178 """
179 return futures.Future(loop=self)
180
181 def create_task(self, coro, *, name=None):
182 """Schedule a coroutine object.
183
184 Return a task object.
185
186 Copied from ``BaseEventLoop.create_task``
187 """
188 self._check_closed()
189 if self._task_factory is None:
190 task = tasks.Task(coro, loop=self, name=name)
191 if task._source_traceback:
192 # Added comment:
193 # this only happens if get_debug() returns True.
194 # In that case, remove create_task from _source_traceback.
195 del task._source_traceback[-1]
196 else:
197 task = self._task_factory(self, coro)
198 tasks._set_task_name(task, name)
199
200 return task
201
202 def set_task_factory(self, factory):
203 """Set a task factory that will be used by loop.create_task().
204
205 If factory is None the default task factory will be set.
206
207 If factory is a callable, it should have a signature matching
208 '(loop, coro)', where 'loop' will be a reference to the active
209 event loop, 'coro' will be a coroutine object. The callable
210 must return a Future.
211
212 Copied from ``BaseEventLoop.set_task_factory``
213 """
214 if factory is not None and not callable(factory):
215 raise TypeError("task factory must be a callable or None")
216 self._task_factory = factory
217
218 def get_task_factory(self):
219 """Return a task factory, or None if the default one is in use.
220
221 Copied from ``BaseEventLoop.get_task_factory``
222 """
223 return self._task_factory
224
225
226 class WebLoopPolicy(asyncio.DefaultEventLoopPolicy): # type: ignore
227 """
228 A simple event loop policy for managing WebLoop based event loops.
229 """
230
231 def __init__(self):
232 self._default_loop = None
233
234 def get_event_loop(self):
235 """Get the current event loop"""
236 if self._default_loop:
237 return self._default_loop
238 return self.new_event_loop()
239
240 def new_event_loop(self):
241 """Create a new event loop"""
242 self._default_loop = WebLoop()
243 return self._default_loop
244
245 def set_event_loop(self, loop: asyncio.AbstractEventLoop):
246 """Set the current event loop"""
247 self._default_loop = loop
248
[end of src/pyodide-py/pyodide/webloop.py]
[start of src/pyodide-py/pyodide/_core.py]
1 import platform
2
3 if platform.system() == "Emscripten":
4 from _pyodide_core import JsProxy, JsException
5 else:
6 # Can add shims here if we are so inclined.
7 class JsException(Exception): # type: ignore
8 """
9 A wrapper around a Javascript Error to allow the Error to be thrown in Python.
10 """
11
12 # Defined in jsproxy.c
13
14 class JsProxy: # type: ignore
15 """A proxy to make a Javascript object behave like a Python object"""
16
17 # Defined in jsproxy.c
18
19
20 __all__ = ["JsProxy", "JsException"]
21
[end of src/pyodide-py/pyodide/_core.py]
[start of src/pyodide-py/pyodide/__init__.py]
1 from ._base import open_url, eval_code, eval_code_async, find_imports, as_nested_list
2 from ._core import JsException # type: ignore
3 from ._importhooks import JsFinder
4 from .webloop import WebLoopPolicy
5 import asyncio
6 import sys
7 import platform
8
9 jsfinder = JsFinder()
10 register_js_module = jsfinder.register_js_module
11 unregister_js_module = jsfinder.unregister_js_module
12 sys.meta_path.append(jsfinder) # type: ignore
13
14 if platform.system() == "Emscripten":
15 asyncio.set_event_loop_policy(WebLoopPolicy())
16
17
18 __version__ = "0.17.dev0"
19
20 __all__ = [
21 "open_url",
22 "eval_code",
23 "eval_code_async",
24 "find_imports",
25 "as_nested_list",
26 "JsException",
27 "register_js_module",
28 "unregister_js_module",
29 ]
30
[end of src/pyodide-py/pyodide/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/pyodide-py/pyodide/__init__.py b/src/pyodide-py/pyodide/__init__.py
--- a/src/pyodide-py/pyodide/__init__.py
+++ b/src/pyodide-py/pyodide/__init__.py
@@ -1,5 +1,5 @@
from ._base import open_url, eval_code, eval_code_async, find_imports, as_nested_list
-from ._core import JsException # type: ignore
+from ._core import JsException, create_once_callable, create_proxy # type: ignore
from ._importhooks import JsFinder
from .webloop import WebLoopPolicy
import asyncio
@@ -26,4 +26,6 @@
"JsException",
"register_js_module",
"unregister_js_module",
+ "create_once_callable",
+ "create_proxy",
]
diff --git a/src/pyodide-py/pyodide/_core.py b/src/pyodide-py/pyodide/_core.py
--- a/src/pyodide-py/pyodide/_core.py
+++ b/src/pyodide-py/pyodide/_core.py
@@ -1,7 +1,13 @@
import platform
+from typing import Any, Callable
if platform.system() == "Emscripten":
- from _pyodide_core import JsProxy, JsException
+ from _pyodide_core import (
+ JsProxy,
+ JsException,
+ create_proxy,
+ create_once_callable,
+ )
else:
# Can add shims here if we are so inclined.
class JsException(Exception): # type: ignore
@@ -16,5 +22,23 @@
# Defined in jsproxy.c
+ # Defined in jsproxy.c
+
+ def create_once_callable(obj: Callable) -> JsProxy:
+ """Wrap a Python callable in a Javascript function that can be called
+ once. After being called the proxy will decrement the reference count
+ of the Callable. The javascript function also has a `destroy` API that
+ can be used to release the proxy without calling it.
+ """
+ return obj
+
+ def create_proxy(obj: Any) -> JsProxy:
+ """Create a `JsProxy` of a `PyProxy`.
+
+ This allows explicit control over the lifetime of the `PyProxy` from
+ Python: call the `destroy` API when done.
+ """
+ return obj
+
-__all__ = ["JsProxy", "JsException"]
+__all__ = ["JsProxy", "JsException", "create_proxy", "create_once_callable"]
diff --git a/src/pyodide-py/pyodide/webloop.py b/src/pyodide-py/pyodide/webloop.py
--- a/src/pyodide-py/pyodide/webloop.py
+++ b/src/pyodide-py/pyodide/webloop.py
@@ -132,11 +132,12 @@
This uses `setTimeout(callback, delay)`
"""
from js import setTimeout
+ from . import create_once_callable
if delay < 0:
raise ValueError("Can't schedule in the past")
h = asyncio.Handle(callback, args, self, context=context)
- setTimeout(h._run, delay * 1000)
+ setTimeout(create_once_callable(h._run), delay * 1000)
return h
def call_at(
| {"golden_diff": "diff --git a/src/pyodide-py/pyodide/__init__.py b/src/pyodide-py/pyodide/__init__.py\n--- a/src/pyodide-py/pyodide/__init__.py\n+++ b/src/pyodide-py/pyodide/__init__.py\n@@ -1,5 +1,5 @@\n from ._base import open_url, eval_code, eval_code_async, find_imports, as_nested_list\n-from ._core import JsException # type: ignore\n+from ._core import JsException, create_once_callable, create_proxy # type: ignore\n from ._importhooks import JsFinder\n from .webloop import WebLoopPolicy\n import asyncio\n@@ -26,4 +26,6 @@\n \"JsException\",\n \"register_js_module\",\n \"unregister_js_module\",\n+ \"create_once_callable\",\n+ \"create_proxy\",\n ]\ndiff --git a/src/pyodide-py/pyodide/_core.py b/src/pyodide-py/pyodide/_core.py\n--- a/src/pyodide-py/pyodide/_core.py\n+++ b/src/pyodide-py/pyodide/_core.py\n@@ -1,7 +1,13 @@\n import platform\n+from typing import Any, Callable\n \n if platform.system() == \"Emscripten\":\n- from _pyodide_core import JsProxy, JsException\n+ from _pyodide_core import (\n+ JsProxy,\n+ JsException,\n+ create_proxy,\n+ create_once_callable,\n+ )\n else:\n # Can add shims here if we are so inclined.\n class JsException(Exception): # type: ignore\n@@ -16,5 +22,23 @@\n \n # Defined in jsproxy.c\n \n+ # Defined in jsproxy.c\n+\n+ def create_once_callable(obj: Callable) -> JsProxy:\n+ \"\"\"Wrap a Python callable in a Javascript function that can be called\n+ once. After being called the proxy will decrement the reference count\n+ of the Callable. The javascript function also has a `destroy` API that\n+ can be used to release the proxy without calling it.\n+ \"\"\"\n+ return obj\n+\n+ def create_proxy(obj: Any) -> JsProxy:\n+ \"\"\"Create a `JsProxy` of a `PyProxy`.\n+\n+ This allows explicit control over the lifetime of the `PyProxy` from\n+ Python: call the `destroy` API when done.\n+ \"\"\"\n+ return obj\n+\n \n-__all__ = [\"JsProxy\", \"JsException\"]\n+__all__ = [\"JsProxy\", \"JsException\", \"create_proxy\", \"create_once_callable\"]\ndiff --git a/src/pyodide-py/pyodide/webloop.py b/src/pyodide-py/pyodide/webloop.py\n--- a/src/pyodide-py/pyodide/webloop.py\n+++ b/src/pyodide-py/pyodide/webloop.py\n@@ -132,11 +132,12 @@\n This uses `setTimeout(callback, delay)`\n \"\"\"\n from js import setTimeout\n+ from . import create_once_callable\n \n if delay < 0:\n raise ValueError(\"Can't schedule in the past\")\n h = asyncio.Handle(callback, args, self, context=context)\n- setTimeout(h._run, delay * 1000)\n+ setTimeout(create_once_callable(h._run), delay * 1000)\n return h\n \n def call_at(\n", "issue": "Potential memory leak in WebLoop.py\nWebLoop.py uses `js.setTimeout` to implement call_* functions. The code is:\r\n\r\n```\r\nh = asyncio.Handle(callback, args, self, context=context)\r\nsetTimeout(h._run, delay * 1000)\r\nreturn h\r\n```\r\n\r\nhttps://github.com/iodide-project/pyodide/blob/de7c3420103fabbf01c589fcf2a429d157a6bd5e/src/pyodide-py/pyodide/webloop.py#L139\r\n\r\nI think this may result in a memory leak, since the `h._run` JS proxy is never released.\n", "before_files": [{"content": "import asyncio\nfrom asyncio import tasks, futures\nimport time\nimport contextvars\n\n\nfrom typing import Callable\n\n\nclass WebLoop(asyncio.AbstractEventLoop):\n \"\"\"A custom event loop for use in Pyodide.\n\n Schedules tasks on the browser event loop. Does no lifecycle management and runs\n forever.\n\n ``run_forever`` and ``run_until_complete`` cannot block like a normal event loop would\n because we only have one thread so blocking would stall the browser event loop\n and prevent anything from ever happening.\n\n We defer all work to the browser event loop using the setTimeout function.\n To ensure that this event loop doesn't stall out UI and other browser handling,\n we want to make sure that each task is scheduled on the browser event loop as a\n task not as a microtask. ``setTimeout(callback, 0)`` enqueues the callback as a\n task so it works well for our purposes.\n \"\"\"\n\n def __init__(self):\n self._task_factory = None\n asyncio._set_running_loop(self)\n\n def get_debug(self):\n return False\n\n #\n # Lifecycle methods: We ignore all lifecycle management\n #\n\n def is_running(self) -> bool:\n \"\"\"Returns ``True`` if the event loop is running.\n\n Always returns ``True`` because WebLoop has no lifecycle management.\n \"\"\"\n return True\n\n def is_closed(self) -> bool:\n \"\"\"Returns ``True`` if the event loop was closed.\n\n Always returns ``False`` because WebLoop has no lifecycle management.\n \"\"\"\n return False\n\n def _check_closed(self):\n \"\"\"Used in create_task.\n\n Would raise an error if ``self.is_closed()``, but we are skipping all lifecycle stuff.\n \"\"\"\n pass\n\n def run_forever(self):\n \"\"\"Run the event loop forever. Does nothing in this implementation.\n\n We cannot block like a normal event loop would\n because we only have one thread so blocking would stall the browser event loop\n and prevent anything from ever happening.\n \"\"\"\n pass\n\n def run_until_complete(self, future):\n \"\"\"Run until future is done.\n\n If the argument is a coroutine, it is wrapped in a Task.\n\n The native event loop `run_until_complete` blocks until evaluation of the\n future is complete and then returns the result of the future.\n Since we cannot block, we just ensure that the future is scheduled and\n return the future. This makes this method a bit useless. Instead, use\n `future.add_done_callback(do_something_with_result)` or:\n ```python\n async def wrapper():\n result = await future\n do_something_with_result(result)\n ```\n \"\"\"\n return asyncio.ensure_future(future)\n\n #\n # Scheduling methods: use browser.setTimeout to schedule tasks on the browser event loop.\n #\n\n def call_soon(self, callback: Callable, *args, context: contextvars.Context = None):\n \"\"\"Arrange for a callback to be called as soon as possible.\n\n Any positional arguments after the callback will be passed to\n the callback when it is called.\n\n This schedules the callback on the browser event loop using ``setTimeout(callback, 0)``.\n \"\"\"\n delay = 0\n return self.call_later(delay, callback, *args, context=context)\n\n def call_soon_threadsafe(\n self, callback: Callable, *args, context: contextvars.Context = None\n ):\n \"\"\"Like ``call_soon()``, but thread-safe.\n\n We have no threads so everything is \"thread safe\", and we just use ``call_soon``.\n \"\"\"\n return self.call_soon(callback, *args, context=context)\n\n def call_later(\n self,\n delay: float,\n callback: Callable,\n *args,\n context: contextvars.Context = None\n ):\n \"\"\"Arrange for a callback to be called at a given time.\n\n Return a Handle: an opaque object with a cancel() method that\n can be used to cancel the call.\n\n The delay can be an int or float, expressed in seconds. It is\n always relative to the current time.\n\n Each callback will be called exactly once. If two callbacks\n are scheduled for exactly the same time, it undefined which\n will be called first.\n\n Any positional arguments after the callback will be passed to\n the callback when it is called.\n\n This uses `setTimeout(callback, delay)`\n \"\"\"\n from js import setTimeout\n\n if delay < 0:\n raise ValueError(\"Can't schedule in the past\")\n h = asyncio.Handle(callback, args, self, context=context)\n setTimeout(h._run, delay * 1000)\n return h\n\n def call_at(\n self,\n when: float,\n callback: Callable,\n *args,\n context: contextvars.Context = None\n ):\n \"\"\"Like ``call_later()``, but uses an absolute time.\n\n Absolute time corresponds to the event loop's ``time()`` method.\n\n This uses ``setTimeout(callback, when - cur_time)``\n \"\"\"\n cur_time = self.time()\n delay = when - cur_time\n return self.call_later(delay, callback, *args, context=context)\n\n #\n # The remaining methods are copied directly from BaseEventLoop\n #\n\n def time(self):\n \"\"\"Return the time according to the event loop's clock.\n\n This is a float expressed in seconds since an epoch, but the\n epoch, precision, accuracy and drift are unspecified and may\n differ per event loop.\n\n Copied from ``BaseEventLoop.time``\n \"\"\"\n return time.monotonic()\n\n def create_future(self):\n \"\"\"Create a Future object attached to the loop.\n\n Copied from ``BaseEventLoop.create_future``\n \"\"\"\n return futures.Future(loop=self)\n\n def create_task(self, coro, *, name=None):\n \"\"\"Schedule a coroutine object.\n\n Return a task object.\n\n Copied from ``BaseEventLoop.create_task``\n \"\"\"\n self._check_closed()\n if self._task_factory is None:\n task = tasks.Task(coro, loop=self, name=name)\n if task._source_traceback:\n # Added comment:\n # this only happens if get_debug() returns True.\n # In that case, remove create_task from _source_traceback.\n del task._source_traceback[-1]\n else:\n task = self._task_factory(self, coro)\n tasks._set_task_name(task, name)\n\n return task\n\n def set_task_factory(self, factory):\n \"\"\"Set a task factory that will be used by loop.create_task().\n\n If factory is None the default task factory will be set.\n\n If factory is a callable, it should have a signature matching\n '(loop, coro)', where 'loop' will be a reference to the active\n event loop, 'coro' will be a coroutine object. The callable\n must return a Future.\n\n Copied from ``BaseEventLoop.set_task_factory``\n \"\"\"\n if factory is not None and not callable(factory):\n raise TypeError(\"task factory must be a callable or None\")\n self._task_factory = factory\n\n def get_task_factory(self):\n \"\"\"Return a task factory, or None if the default one is in use.\n\n Copied from ``BaseEventLoop.get_task_factory``\n \"\"\"\n return self._task_factory\n\n\nclass WebLoopPolicy(asyncio.DefaultEventLoopPolicy): # type: ignore\n \"\"\"\n A simple event loop policy for managing WebLoop based event loops.\n \"\"\"\n\n def __init__(self):\n self._default_loop = None\n\n def get_event_loop(self):\n \"\"\"Get the current event loop\"\"\"\n if self._default_loop:\n return self._default_loop\n return self.new_event_loop()\n\n def new_event_loop(self):\n \"\"\"Create a new event loop\"\"\"\n self._default_loop = WebLoop()\n return self._default_loop\n\n def set_event_loop(self, loop: asyncio.AbstractEventLoop):\n \"\"\"Set the current event loop\"\"\"\n self._default_loop = loop\n", "path": "src/pyodide-py/pyodide/webloop.py"}, {"content": "import platform\n\nif platform.system() == \"Emscripten\":\n from _pyodide_core import JsProxy, JsException\nelse:\n # Can add shims here if we are so inclined.\n class JsException(Exception): # type: ignore\n \"\"\"\n A wrapper around a Javascript Error to allow the Error to be thrown in Python.\n \"\"\"\n\n # Defined in jsproxy.c\n\n class JsProxy: # type: ignore\n \"\"\"A proxy to make a Javascript object behave like a Python object\"\"\"\n\n # Defined in jsproxy.c\n\n\n__all__ = [\"JsProxy\", \"JsException\"]\n", "path": "src/pyodide-py/pyodide/_core.py"}, {"content": "from ._base import open_url, eval_code, eval_code_async, find_imports, as_nested_list\nfrom ._core import JsException # type: ignore\nfrom ._importhooks import JsFinder\nfrom .webloop import WebLoopPolicy\nimport asyncio\nimport sys\nimport platform\n\njsfinder = JsFinder()\nregister_js_module = jsfinder.register_js_module\nunregister_js_module = jsfinder.unregister_js_module\nsys.meta_path.append(jsfinder) # type: ignore\n\nif platform.system() == \"Emscripten\":\n asyncio.set_event_loop_policy(WebLoopPolicy())\n\n\n__version__ = \"0.17.dev0\"\n\n__all__ = [\n \"open_url\",\n \"eval_code\",\n \"eval_code_async\",\n \"find_imports\",\n \"as_nested_list\",\n \"JsException\",\n \"register_js_module\",\n \"unregister_js_module\",\n]\n", "path": "src/pyodide-py/pyodide/__init__.py"}]} | 3,567 | 759 |
gh_patches_debug_2909 | rasdani/github-patches | git_diff | mirumee__ariadne-799 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Support Starlette 0.18.0
Was just released: https://github.com/encode/starlette/releases/tag/0.18.0
and currently the dependency is pinned at `<0.18.0`.
</issue>
<code>
[start of setup.py]
1 #! /usr/bin/env python
2 import os
3 from setuptools import setup
4
5 CLASSIFIERS = [
6 "Development Status :: 4 - Beta",
7 "Intended Audience :: Developers",
8 "License :: OSI Approved :: BSD License",
9 "Operating System :: OS Independent",
10 "Programming Language :: Python",
11 "Programming Language :: Python :: 3.7",
12 "Programming Language :: Python :: 3.8",
13 "Programming Language :: Python :: 3.9",
14 "Programming Language :: Python :: 3.10",
15 "Topic :: Software Development :: Libraries :: Python Modules",
16 ]
17
18 README_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md")
19 with open(README_PATH, "r", encoding="utf8") as f:
20 README = f.read()
21
22 setup(
23 name="ariadne",
24 author="Mirumee Software",
25 author_email="[email protected]",
26 description="Ariadne is a Python library for implementing GraphQL servers.",
27 long_description=README,
28 long_description_content_type="text/markdown",
29 license="BSD",
30 version="0.15.0.dev3",
31 url="https://github.com/mirumee/ariadne",
32 packages=["ariadne"],
33 include_package_data=True,
34 install_requires=[
35 "graphql-core>=3.2.0,<3.3",
36 "starlette<0.18",
37 "typing_extensions>=3.6.0",
38 ],
39 extras_require={"asgi-file-uploads": ["python-multipart>=0.0.5"]},
40 classifiers=CLASSIFIERS,
41 platforms=["any"],
42 zip_safe=False,
43 )
44
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -33,7 +33,7 @@
include_package_data=True,
install_requires=[
"graphql-core>=3.2.0,<3.3",
- "starlette<0.18",
+ "starlette<0.19",
"typing_extensions>=3.6.0",
],
extras_require={"asgi-file-uploads": ["python-multipart>=0.0.5"]},
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -33,7 +33,7 @@\n include_package_data=True,\n install_requires=[\n \"graphql-core>=3.2.0,<3.3\",\n- \"starlette<0.18\",\n+ \"starlette<0.19\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n", "issue": "Support Starlette 0.18.0\nWas just released: https://github.com/encode/starlette/releases/tag/0.18.0\r\nand currently the dependency is pinned at `<0.18.0`.\n", "before_files": [{"content": "#! /usr/bin/env python\nimport os\nfrom setuptools import setup\n\nCLASSIFIERS = [\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n]\n\nREADME_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"README.md\")\nwith open(README_PATH, \"r\", encoding=\"utf8\") as f:\n README = f.read()\n\nsetup(\n name=\"ariadne\",\n author=\"Mirumee Software\",\n author_email=\"[email protected]\",\n description=\"Ariadne is a Python library for implementing GraphQL servers.\",\n long_description=README,\n long_description_content_type=\"text/markdown\",\n license=\"BSD\",\n version=\"0.15.0.dev3\",\n url=\"https://github.com/mirumee/ariadne\",\n packages=[\"ariadne\"],\n include_package_data=True,\n install_requires=[\n \"graphql-core>=3.2.0,<3.3\",\n \"starlette<0.18\",\n \"typing_extensions>=3.6.0\",\n ],\n extras_require={\"asgi-file-uploads\": [\"python-multipart>=0.0.5\"]},\n classifiers=CLASSIFIERS,\n platforms=[\"any\"],\n zip_safe=False,\n)\n", "path": "setup.py"}]} | 1,015 | 114 |
gh_patches_debug_2549 | rasdani/github-patches | git_diff | streamlit__streamlit-724 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Fix Danny's S3 sharing issue
It looks like `[s3] keyPrefix=...` isn't making it into the URLs being fetched from S3.
This is the address of a manifest protobuf we want to fetch:
`https://yelp-people-dev.s3-us-west-2.amazonaws.com/~dqn/st/0.49.0-A8NT/reports/NJphBiGR4twz88mU9wTegn/manifest.pb`
And this is the address that's being generated:
`https://yelp-people-dev.s3.amazonaws.com/~dqn/reports/NJphBiGR4twz88mU9wTegn/manifest.pb`
The generated address is missing the `st/<streamlit version>` bits. Looks like we're splitting on a forward slash on the pathname in `ConnectionManager.fetchManifest`, which is giving us the wrong result because the keyPrefix itself has a forward slash.
</issue>
<code>
[start of examples/bart_vs_bikes.py]
1 # -*- coding: utf-8 -*-
2 # Copyright 2018-2019 Streamlit Inc.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import copy
17 from urllib.parse import urljoin
18 import pandas as pd
19 import streamlit as st
20
21
22 st.title("BART stops vs. bike rentals")
23
24 st.write(
25 """
26 This plot shows two things:
27 * Bay Area Rapit Transit (BART) train lines plotted as arcs connecting the
28 stations.
29 * A 3D hexagonal histogram plot of bike-sharing rentals (origin locations).
30 """
31 )
32
33
34 @st.cache
35 def from_data_file(filename):
36 dirname = "https://raw.githubusercontent.com/streamlit/streamlit/develop/examples/data/"
37 url = urljoin(dirname, filename)
38 return pd.read_json(url)
39
40
41 # Grab some data
42 bart_stop_stats = copy.deepcopy(from_data_file("bart_stop_stats.json"))
43 bart_path_stats = from_data_file("bart_path_stats.json")
44 bike_rental_stats = from_data_file("bike_rental_stats.json")
45
46 # Move bart stop name to the 1st column, so it looks nicer when printed as a
47 # table.
48 bart_stop_names = bart_stop_stats["name"]
49 bart_stop_stats.drop(labels=["name"], axis=1, inplace=True)
50 bart_stop_stats.insert(0, "name", bart_stop_names)
51
52 st.deck_gl_chart(
53 viewport={"latitude": 37.76, "longitude": -122.4, "zoom": 11, "pitch": 50},
54 layers=[
55 {
56 # Plot number of bike rentals throughtout the city
57 "type": "HexagonLayer",
58 "data": bike_rental_stats,
59 "radius": 200,
60 "elevationScale": 4,
61 "elevationRange": [0, 1000],
62 "pickable": True,
63 "extruded": True,
64 },
65 {
66 # Now plot locations of Bart stops
67 # ...and let's size the stops according to traffic
68 "type": "ScatterplotLayer",
69 "data": bart_stop_stats,
70 "radiusScale": 10,
71 "getRadius": 50,
72 },
73 {
74 # Now Add names of Bart stops
75 "type": "TextLayer",
76 "data": bart_stop_stats,
77 "getText": "name",
78 "getColor": [0, 0, 0, 200],
79 "getSize": 15,
80 },
81 {
82 # And draw some arcs connecting the stops
83 "type": "ArcLayer",
84 "data": bart_path_stats,
85 "pickable": True,
86 "autoHighlight": True,
87 "getStrokeWidth": 10,
88 },
89 ],
90 )
91
[end of examples/bart_vs_bikes.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/examples/bart_vs_bikes.py b/examples/bart_vs_bikes.py
--- a/examples/bart_vs_bikes.py
+++ b/examples/bart_vs_bikes.py
@@ -33,7 +33,9 @@
@st.cache
def from_data_file(filename):
- dirname = "https://raw.githubusercontent.com/streamlit/streamlit/develop/examples/data/"
+ dirname = (
+ "https://raw.githubusercontent.com/streamlit/streamlit/develop/examples/data/"
+ )
url = urljoin(dirname, filename)
return pd.read_json(url)
| {"golden_diff": "diff --git a/examples/bart_vs_bikes.py b/examples/bart_vs_bikes.py\n--- a/examples/bart_vs_bikes.py\n+++ b/examples/bart_vs_bikes.py\n@@ -33,7 +33,9 @@\n \n @st.cache\n def from_data_file(filename):\n- dirname = \"https://raw.githubusercontent.com/streamlit/streamlit/develop/examples/data/\" \n+ dirname = (\n+ \"https://raw.githubusercontent.com/streamlit/streamlit/develop/examples/data/\"\n+ )\n url = urljoin(dirname, filename)\n return pd.read_json(url)\n", "issue": "Fix Danny's S3 sharing issue\nIt looks like `[s3] keyPrefix=...` isn't making it into the URLs being fetched from S3.\r\n\r\nThis is the address of a manifest protobuf we want to fetch:\r\n`https://yelp-people-dev.s3-us-west-2.amazonaws.com/~dqn/st/0.49.0-A8NT/reports/NJphBiGR4twz88mU9wTegn/manifest.pb`\r\n\r\nAnd this is the address that's being generated:\r\n`https://yelp-people-dev.s3.amazonaws.com/~dqn/reports/NJphBiGR4twz88mU9wTegn/manifest.pb`\r\n\r\nThe generated address is missing the `st/<streamlit version>` bits. Looks like we're splitting on a forward slash on the pathname in `ConnectionManager.fetchManifest`, which is giving us the wrong result because the keyPrefix itself has a forward slash.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2018-2019 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nfrom urllib.parse import urljoin\nimport pandas as pd\nimport streamlit as st\n\n\nst.title(\"BART stops vs. bike rentals\")\n\nst.write(\n \"\"\"\n This plot shows two things:\n * Bay Area Rapit Transit (BART) train lines plotted as arcs connecting the\n stations.\n * A 3D hexagonal histogram plot of bike-sharing rentals (origin locations).\n\"\"\"\n)\n\n\[email protected]\ndef from_data_file(filename):\n dirname = \"https://raw.githubusercontent.com/streamlit/streamlit/develop/examples/data/\" \n url = urljoin(dirname, filename)\n return pd.read_json(url)\n\n\n# Grab some data\nbart_stop_stats = copy.deepcopy(from_data_file(\"bart_stop_stats.json\"))\nbart_path_stats = from_data_file(\"bart_path_stats.json\")\nbike_rental_stats = from_data_file(\"bike_rental_stats.json\")\n\n# Move bart stop name to the 1st column, so it looks nicer when printed as a\n# table.\nbart_stop_names = bart_stop_stats[\"name\"]\nbart_stop_stats.drop(labels=[\"name\"], axis=1, inplace=True)\nbart_stop_stats.insert(0, \"name\", bart_stop_names)\n\nst.deck_gl_chart(\n viewport={\"latitude\": 37.76, \"longitude\": -122.4, \"zoom\": 11, \"pitch\": 50},\n layers=[\n {\n # Plot number of bike rentals throughtout the city\n \"type\": \"HexagonLayer\",\n \"data\": bike_rental_stats,\n \"radius\": 200,\n \"elevationScale\": 4,\n \"elevationRange\": [0, 1000],\n \"pickable\": True,\n \"extruded\": True,\n },\n {\n # Now plot locations of Bart stops\n # ...and let's size the stops according to traffic\n \"type\": \"ScatterplotLayer\",\n \"data\": bart_stop_stats,\n \"radiusScale\": 10,\n \"getRadius\": 50,\n },\n {\n # Now Add names of Bart stops\n \"type\": \"TextLayer\",\n \"data\": bart_stop_stats,\n \"getText\": \"name\",\n \"getColor\": [0, 0, 0, 200],\n \"getSize\": 15,\n },\n {\n # And draw some arcs connecting the stops\n \"type\": \"ArcLayer\",\n \"data\": bart_path_stats,\n \"pickable\": True,\n \"autoHighlight\": True,\n \"getStrokeWidth\": 10,\n },\n ],\n)\n", "path": "examples/bart_vs_bikes.py"}]} | 1,615 | 124 |
gh_patches_debug_39456 | rasdani/github-patches | git_diff | mabel-dev__opteryx-1288 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
✨ `IS` filters should be prioritized just after `Eq` filters in the optimizer
### Thanks for stopping by to let us know something could be better!
**Is your feature request related to a problem? Please describe.** _A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]_
**Describe the solution you'd like** _A clear and concise description of what you want to happen._
**Describe alternatives you've considered** _A clear and concise description of any alternative solutions or features you've considered._
**Additional context** _Add any other context or screenshots about the feature request here._
</issue>
<code>
[start of opteryx/components/heuristic_optimizer/strategies/split_conjunctive_predicates.py]
1 from orso.tools import random_string
2
3 from opteryx.components.logical_planner import LogicalPlan
4 from opteryx.components.logical_planner import LogicalPlanNode
5 from opteryx.components.logical_planner import LogicalPlanStepType
6 from opteryx.managers.expression import NodeType
7 from opteryx.managers.expression import get_all_nodes_of_type
8
9 from .optimization_strategy import HeuristicOptimizerContext
10 from .optimization_strategy import OptimizationStrategy
11
12 NODE_ORDER = {
13 "Eq": 1,
14 "NotEq": 1,
15 "Gt": 2,
16 "GtEq": 2,
17 "Lt": 2,
18 "LtEq": 2,
19 "Like": 4,
20 "ILike": 4,
21 "NotLike": 4,
22 "NotILike": 4,
23 }
24
25
26 def _tag_predicates(nodes):
27 """
28 Here we add tags to the predicates to assist with optimization.
29
30 Weighting of predicates based on naive rules, this is mostly useful for situations where
31 we do not have statistics to make cost-based decisions later. We're going to start with
32 arbitrary numbers, we need to find a way to refine these over time. The logic is
33 roughly:
34 - 35 is something that is expensive (we're running function)
35 - 32 is where we're doing a complex comparison
36 ...
37 - 3 is doing an eqi comparison on two literals (don't actually do that in a filter)
38 """
39
40 for node in nodes:
41 node.weight = 0
42 node.simple = True
43 node.relations = set()
44
45 if not node.condition.node_type == NodeType.COMPARISON_OPERATOR:
46 node.weight += 35
47 node.simple = False
48 continue
49 node.weight = NODE_ORDER.get(node.condition.value, 12)
50 if node.condition.left.node_type == NodeType.LITERAL:
51 node.weight += 1
52 elif node.condition.left.node_type == NodeType.IDENTIFIER:
53 node.weight += 3
54 node.relations.add(node.condition.left.source)
55 else:
56 node.weight += 10
57 node.simple = False
58 if node.condition.right.node_type == NodeType.LITERAL:
59 node.weight += 1
60 elif node.condition.right.node_type == NodeType.IDENTIFIER:
61 node.weight += 3
62 node.relations.add(node.condition.right.source)
63 else:
64 node.weight += 10
65 node.simple = False
66
67 return sorted(nodes, key=lambda node: node.weight, reverse=True)
68
69
70 def _inner_split(node):
71 if node.node_type != NodeType.AND:
72 return [node]
73
74 # get the left and right filters
75 left_nodes = _inner_split(node.left)
76 right_nodes = _inner_split(node.right)
77
78 return left_nodes + right_nodes
79
80
81 def _unique_nodes(nodes: list) -> list:
82 seen_identities = {}
83
84 for node in nodes:
85 if node.condition:
86 identity = node.condition.schema_column.identity
87 if identity not in seen_identities:
88 # if it's the first time we've seen it, capture it
89 seen_identities[identity] = node
90 elif node.condition.left.schema_column and node.condition.right.schema_column:
91 # if we're seeing it again AND it has left and right columns, it's probably
92 # the complete node so replace what we captured before
93 seen_identities[identity] = node
94
95 return list(seen_identities.values())
96
97
98 class SplitConjunctivePredicatesStrategy(OptimizationStrategy):
99 def visit(
100 self, node: LogicalPlanNode, context: HeuristicOptimizerContext
101 ) -> HeuristicOptimizerContext:
102 """
103 Conjunctive Predicates (ANDs) can be split and executed in any order to get the
104 same result. This means we can split them into separate steps in the plan.
105
106 The reason for splitting is two-fold:
107
108 1) Smaller expressions are easier to move around the query plan as they have fewer
109 dependencies.
110 2) Executing predicates like this means each runs in turn, filtering out some of
111 the records meaning susequent predicates will be operating on fewer records,
112 which is generally faster. We can also order these predicates to get a faster
113 result, balancing the selectivity (get rid of more records faster) vs cost of
114 the check (a numeric check is faster than a string check)
115 """
116 if node.node_type == LogicalPlanStepType.Filter:
117 split_predicates = _inner_split(node.condition)
118 new_nodes = []
119 for predicate in split_predicates:
120 new_node = LogicalPlanNode(
121 node_type=LogicalPlanStepType.Filter, condition=predicate
122 )
123 new_node.columns = get_all_nodes_of_type(
124 node.condition, select_nodes=(NodeType.IDENTIFIER,)
125 )
126 new_nodes.append(new_node)
127 new_nodes = _unique_nodes(new_nodes)
128 new_nodes = _tag_predicates(new_nodes)
129 else:
130 new_nodes = [node]
131
132 for i, new_node in enumerate(new_nodes):
133 nid = random_string() if (i + 1) < len(new_nodes) else context.node_id
134 context.optimized_plan.add_node(nid, LogicalPlanNode(**new_node.properties))
135 if context.parent_nid:
136 context.optimized_plan.add_edge(nid, context.parent_nid)
137 context.parent_nid = nid
138
139 return context
140
141 def complete(self, plan: LogicalPlan, context: HeuristicOptimizerContext) -> LogicalPlan:
142 # No finalization needed for this strategy
143 return plan
144
[end of opteryx/components/heuristic_optimizer/strategies/split_conjunctive_predicates.py]
[start of opteryx/connectors/sql_connector.py]
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 """
14
15 """
16 from decimal import Decimal
17
18 from orso import DataFrame
19 from orso.schema import FlatColumn
20 from orso.schema import RelationSchema
21 from orso.types import PYTHON_TO_ORSO_MAP
22
23 from opteryx.connectors.base.base_connector import DEFAULT_MORSEL_SIZE
24 from opteryx.connectors.base.base_connector import INITIAL_CHUNK_SIZE
25 from opteryx.connectors.base.base_connector import BaseConnector
26 from opteryx.exceptions import MissingDependencyError
27 from opteryx.exceptions import UnmetRequirementError
28 from opteryx.third_party.query_builder import Query
29
30
31 class SqlConnector(BaseConnector):
32 __mode__ = "Sql"
33
34 def __init__(self, *args, connection: str = None, engine=None, **kwargs):
35 super().__init__(*args, **kwargs)
36 try:
37 from sqlalchemy import MetaData
38 from sqlalchemy import create_engine
39 except ImportError as err: # pragma: nocover
40 raise MissingDependencyError(err.name) from err
41
42 if engine is None and connection is None:
43 raise UnmetRequirementError(
44 "SQL Connections require either a SQL Alchemy connection string in the 'connection' parameter, or a SQL Alchemy Engine in the 'engine' parameter."
45 )
46
47 # create the SqlAlchemy engine
48 if engine is None:
49 self._engine = create_engine(connection)
50 else:
51 self._engine = engine
52
53 self.schema = None
54 self.metadata = MetaData()
55
56 def read_dataset(
57 self, columns: list = None, chunk_size: int = INITIAL_CHUNK_SIZE
58 ) -> "DatasetReader":
59 self.chunk_size = chunk_size
60 result_schema = self.schema
61
62 query_builder = Query().FROM(self.dataset)
63
64 # if we're projecting, update the SQL and the target morsel schema
65 if columns:
66 column_names = [col.name for col in columns]
67 query_builder.add("SELECT", *column_names)
68 result_schema.columns = [col for col in self.schema.columns if col.name in column_names]
69 else:
70 query_builder.add("SELECT", "*")
71
72 morsel = DataFrame(schema=result_schema)
73
74 with self._engine.connect() as conn:
75 # DEBUG: log ("READ DATASET\n", str(query_builder))
76 for row in conn.execute(str(query_builder)):
77 morsel._rows.append(row)
78 if len(morsel) == self.chunk_size:
79 yield morsel.arrow()
80
81 if morsel.nbytes > 0:
82 self.chunk_size = int(len(morsel) // (morsel.nbytes / DEFAULT_MORSEL_SIZE))
83
84 morsel = DataFrame(schema=result_schema)
85
86 if len(morsel) > 0:
87 yield morsel.arrow()
88
89 def get_dataset_schema(self) -> RelationSchema:
90 from sqlalchemy import Table
91
92 if self.schema:
93 return self.schema
94
95 # Try to read the schema from the metastore
96 self.schema = self.read_schema_from_metastore()
97 if self.schema:
98 return self.schema
99
100 # get the schema from the dataset
101 # DEBUG: log ("GET SQL SCHEMA:", self.dataset)
102 table = Table(self.dataset, self.metadata, autoload_with=self._engine)
103
104 self.schema = RelationSchema(
105 name=table.name,
106 columns=[
107 FlatColumn(
108 name=column.name,
109 type=PYTHON_TO_ORSO_MAP[column.type.python_type],
110 precision=None if column.type.python_type != Decimal else column.type.precision,
111 scale=None if column.type.python_type != Decimal else column.type.scale,
112 nullable=column.nullable,
113 )
114 for column in table.columns
115 ],
116 )
117
118 return self.schema
119
[end of opteryx/connectors/sql_connector.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/opteryx/components/heuristic_optimizer/strategies/split_conjunctive_predicates.py b/opteryx/components/heuristic_optimizer/strategies/split_conjunctive_predicates.py
--- a/opteryx/components/heuristic_optimizer/strategies/split_conjunctive_predicates.py
+++ b/opteryx/components/heuristic_optimizer/strategies/split_conjunctive_predicates.py
@@ -34,6 +34,8 @@
- 35 is something that is expensive (we're running function)
- 32 is where we're doing a complex comparison
...
+ - 7 are IS/IS NOT filters
+ - 5 is doing an eqi comparison on a column and a literal
- 3 is doing an eqi comparison on two literals (don't actually do that in a filter)
"""
@@ -42,6 +44,10 @@
node.simple = True
node.relations = set()
+ if node.condition.node_type == NodeType.UNARY_OPERATOR:
+ # these are IS/IS NOT filters
+ node.weight += 7
+ continue
if not node.condition.node_type == NodeType.COMPARISON_OPERATOR:
node.weight += 35
node.simple = False
diff --git a/opteryx/connectors/sql_connector.py b/opteryx/connectors/sql_connector.py
--- a/opteryx/connectors/sql_connector.py
+++ b/opteryx/connectors/sql_connector.py
@@ -99,20 +99,39 @@
# get the schema from the dataset
# DEBUG: log ("GET SQL SCHEMA:", self.dataset)
- table = Table(self.dataset, self.metadata, autoload_with=self._engine)
-
- self.schema = RelationSchema(
- name=table.name,
- columns=[
- FlatColumn(
- name=column.name,
- type=PYTHON_TO_ORSO_MAP[column.type.python_type],
- precision=None if column.type.python_type != Decimal else column.type.precision,
- scale=None if column.type.python_type != Decimal else column.type.scale,
- nullable=column.nullable,
+ try:
+ table = Table(self.dataset, self.metadata, autoload_with=self._engine)
+
+ self.schema = RelationSchema(
+ name=table.name,
+ columns=[
+ FlatColumn(
+ name=column.name,
+ type=PYTHON_TO_ORSO_MAP[column.type.python_type],
+ precision=None
+ if column.type.python_type != Decimal
+ else column.type.precision,
+ scale=None if column.type.python_type != Decimal else column.type.scale,
+ nullable=column.nullable,
+ )
+ for column in table.columns
+ ],
+ )
+ except Exception as err:
+ # Fall back to getting the schema from the first row, this is the column names, and where
+ # possible, column types.
+ # DEBUG: log ("APPROXIMATING SCHEMA OF {self.dataset} BECAUSE OF {err}")
+ with self._engine.connect() as conn:
+ row = conn.execute(f"SELECT * FROM `{self.dataset}`").fetchone()
+ self.schema = RelationSchema(
+ name=self.dataset,
+ columns=[
+ FlatColumn(
+ name=column,
+ type=0 if value is None else PYTHON_TO_ORSO_MAP[type(value)],
+ )
+ for column, value in row.items()
+ ],
)
- for column in table.columns
- ],
- )
return self.schema
| {"golden_diff": "diff --git a/opteryx/components/heuristic_optimizer/strategies/split_conjunctive_predicates.py b/opteryx/components/heuristic_optimizer/strategies/split_conjunctive_predicates.py\n--- a/opteryx/components/heuristic_optimizer/strategies/split_conjunctive_predicates.py\n+++ b/opteryx/components/heuristic_optimizer/strategies/split_conjunctive_predicates.py\n@@ -34,6 +34,8 @@\n - 35 is something that is expensive (we're running function)\n - 32 is where we're doing a complex comparison\n ...\n+ - 7 are IS/IS NOT filters\n+ - 5 is doing an eqi comparison on a column and a literal\n - 3 is doing an eqi comparison on two literals (don't actually do that in a filter)\n \"\"\"\n \n@@ -42,6 +44,10 @@\n node.simple = True\n node.relations = set()\n \n+ if node.condition.node_type == NodeType.UNARY_OPERATOR:\n+ # these are IS/IS NOT filters\n+ node.weight += 7\n+ continue\n if not node.condition.node_type == NodeType.COMPARISON_OPERATOR:\n node.weight += 35\n node.simple = False\ndiff --git a/opteryx/connectors/sql_connector.py b/opteryx/connectors/sql_connector.py\n--- a/opteryx/connectors/sql_connector.py\n+++ b/opteryx/connectors/sql_connector.py\n@@ -99,20 +99,39 @@\n \n # get the schema from the dataset\n # DEBUG: log (\"GET SQL SCHEMA:\", self.dataset)\n- table = Table(self.dataset, self.metadata, autoload_with=self._engine)\n-\n- self.schema = RelationSchema(\n- name=table.name,\n- columns=[\n- FlatColumn(\n- name=column.name,\n- type=PYTHON_TO_ORSO_MAP[column.type.python_type],\n- precision=None if column.type.python_type != Decimal else column.type.precision,\n- scale=None if column.type.python_type != Decimal else column.type.scale,\n- nullable=column.nullable,\n+ try:\n+ table = Table(self.dataset, self.metadata, autoload_with=self._engine)\n+\n+ self.schema = RelationSchema(\n+ name=table.name,\n+ columns=[\n+ FlatColumn(\n+ name=column.name,\n+ type=PYTHON_TO_ORSO_MAP[column.type.python_type],\n+ precision=None\n+ if column.type.python_type != Decimal\n+ else column.type.precision,\n+ scale=None if column.type.python_type != Decimal else column.type.scale,\n+ nullable=column.nullable,\n+ )\n+ for column in table.columns\n+ ],\n+ )\n+ except Exception as err:\n+ # Fall back to getting the schema from the first row, this is the column names, and where\n+ # possible, column types.\n+ # DEBUG: log (\"APPROXIMATING SCHEMA OF {self.dataset} BECAUSE OF {err}\")\n+ with self._engine.connect() as conn:\n+ row = conn.execute(f\"SELECT * FROM `{self.dataset}`\").fetchone()\n+ self.schema = RelationSchema(\n+ name=self.dataset,\n+ columns=[\n+ FlatColumn(\n+ name=column,\n+ type=0 if value is None else PYTHON_TO_ORSO_MAP[type(value)],\n+ )\n+ for column, value in row.items()\n+ ],\n )\n- for column in table.columns\n- ],\n- )\n \n return self.schema\n", "issue": "\u2728 `IS` filters should be prioritized just after `Eq` filters in the optimizer\n### Thanks for stopping by to let us know something could be better!\r\n\r\n**Is your feature request related to a problem? Please describe.** _A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]_\r\n\r\n**Describe the solution you'd like** _A clear and concise description of what you want to happen._\r\n\r\n**Describe alternatives you've considered** _A clear and concise description of any alternative solutions or features you've considered._\r\n\r\n**Additional context** _Add any other context or screenshots about the feature request here._\r\n\n", "before_files": [{"content": "from orso.tools import random_string\n\nfrom opteryx.components.logical_planner import LogicalPlan\nfrom opteryx.components.logical_planner import LogicalPlanNode\nfrom opteryx.components.logical_planner import LogicalPlanStepType\nfrom opteryx.managers.expression import NodeType\nfrom opteryx.managers.expression import get_all_nodes_of_type\n\nfrom .optimization_strategy import HeuristicOptimizerContext\nfrom .optimization_strategy import OptimizationStrategy\n\nNODE_ORDER = {\n \"Eq\": 1,\n \"NotEq\": 1,\n \"Gt\": 2,\n \"GtEq\": 2,\n \"Lt\": 2,\n \"LtEq\": 2,\n \"Like\": 4,\n \"ILike\": 4,\n \"NotLike\": 4,\n \"NotILike\": 4,\n}\n\n\ndef _tag_predicates(nodes):\n \"\"\"\n Here we add tags to the predicates to assist with optimization.\n\n Weighting of predicates based on naive rules, this is mostly useful for situations where\n we do not have statistics to make cost-based decisions later. We're going to start with\n arbitrary numbers, we need to find a way to refine these over time. The logic is\n roughly:\n - 35 is something that is expensive (we're running function)\n - 32 is where we're doing a complex comparison\n ...\n - 3 is doing an eqi comparison on two literals (don't actually do that in a filter)\n \"\"\"\n\n for node in nodes:\n node.weight = 0\n node.simple = True\n node.relations = set()\n\n if not node.condition.node_type == NodeType.COMPARISON_OPERATOR:\n node.weight += 35\n node.simple = False\n continue\n node.weight = NODE_ORDER.get(node.condition.value, 12)\n if node.condition.left.node_type == NodeType.LITERAL:\n node.weight += 1\n elif node.condition.left.node_type == NodeType.IDENTIFIER:\n node.weight += 3\n node.relations.add(node.condition.left.source)\n else:\n node.weight += 10\n node.simple = False\n if node.condition.right.node_type == NodeType.LITERAL:\n node.weight += 1\n elif node.condition.right.node_type == NodeType.IDENTIFIER:\n node.weight += 3\n node.relations.add(node.condition.right.source)\n else:\n node.weight += 10\n node.simple = False\n\n return sorted(nodes, key=lambda node: node.weight, reverse=True)\n\n\ndef _inner_split(node):\n if node.node_type != NodeType.AND:\n return [node]\n\n # get the left and right filters\n left_nodes = _inner_split(node.left)\n right_nodes = _inner_split(node.right)\n\n return left_nodes + right_nodes\n\n\ndef _unique_nodes(nodes: list) -> list:\n seen_identities = {}\n\n for node in nodes:\n if node.condition:\n identity = node.condition.schema_column.identity\n if identity not in seen_identities:\n # if it's the first time we've seen it, capture it\n seen_identities[identity] = node\n elif node.condition.left.schema_column and node.condition.right.schema_column:\n # if we're seeing it again AND it has left and right columns, it's probably\n # the complete node so replace what we captured before\n seen_identities[identity] = node\n\n return list(seen_identities.values())\n\n\nclass SplitConjunctivePredicatesStrategy(OptimizationStrategy):\n def visit(\n self, node: LogicalPlanNode, context: HeuristicOptimizerContext\n ) -> HeuristicOptimizerContext:\n \"\"\"\n Conjunctive Predicates (ANDs) can be split and executed in any order to get the\n same result. This means we can split them into separate steps in the plan.\n\n The reason for splitting is two-fold:\n\n 1) Smaller expressions are easier to move around the query plan as they have fewer\n dependencies.\n 2) Executing predicates like this means each runs in turn, filtering out some of\n the records meaning susequent predicates will be operating on fewer records,\n which is generally faster. We can also order these predicates to get a faster\n result, balancing the selectivity (get rid of more records faster) vs cost of\n the check (a numeric check is faster than a string check)\n \"\"\"\n if node.node_type == LogicalPlanStepType.Filter:\n split_predicates = _inner_split(node.condition)\n new_nodes = []\n for predicate in split_predicates:\n new_node = LogicalPlanNode(\n node_type=LogicalPlanStepType.Filter, condition=predicate\n )\n new_node.columns = get_all_nodes_of_type(\n node.condition, select_nodes=(NodeType.IDENTIFIER,)\n )\n new_nodes.append(new_node)\n new_nodes = _unique_nodes(new_nodes)\n new_nodes = _tag_predicates(new_nodes)\n else:\n new_nodes = [node]\n\n for i, new_node in enumerate(new_nodes):\n nid = random_string() if (i + 1) < len(new_nodes) else context.node_id\n context.optimized_plan.add_node(nid, LogicalPlanNode(**new_node.properties))\n if context.parent_nid:\n context.optimized_plan.add_edge(nid, context.parent_nid)\n context.parent_nid = nid\n\n return context\n\n def complete(self, plan: LogicalPlan, context: HeuristicOptimizerContext) -> LogicalPlan:\n # No finalization needed for this strategy\n return plan\n", "path": "opteryx/components/heuristic_optimizer/strategies/split_conjunctive_predicates.py"}, {"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n\n\"\"\"\nfrom decimal import Decimal\n\nfrom orso import DataFrame\nfrom orso.schema import FlatColumn\nfrom orso.schema import RelationSchema\nfrom orso.types import PYTHON_TO_ORSO_MAP\n\nfrom opteryx.connectors.base.base_connector import DEFAULT_MORSEL_SIZE\nfrom opteryx.connectors.base.base_connector import INITIAL_CHUNK_SIZE\nfrom opteryx.connectors.base.base_connector import BaseConnector\nfrom opteryx.exceptions import MissingDependencyError\nfrom opteryx.exceptions import UnmetRequirementError\nfrom opteryx.third_party.query_builder import Query\n\n\nclass SqlConnector(BaseConnector):\n __mode__ = \"Sql\"\n\n def __init__(self, *args, connection: str = None, engine=None, **kwargs):\n super().__init__(*args, **kwargs)\n try:\n from sqlalchemy import MetaData\n from sqlalchemy import create_engine\n except ImportError as err: # pragma: nocover\n raise MissingDependencyError(err.name) from err\n\n if engine is None and connection is None:\n raise UnmetRequirementError(\n \"SQL Connections require either a SQL Alchemy connection string in the 'connection' parameter, or a SQL Alchemy Engine in the 'engine' parameter.\"\n )\n\n # create the SqlAlchemy engine\n if engine is None:\n self._engine = create_engine(connection)\n else:\n self._engine = engine\n\n self.schema = None\n self.metadata = MetaData()\n\n def read_dataset(\n self, columns: list = None, chunk_size: int = INITIAL_CHUNK_SIZE\n ) -> \"DatasetReader\":\n self.chunk_size = chunk_size\n result_schema = self.schema\n\n query_builder = Query().FROM(self.dataset)\n\n # if we're projecting, update the SQL and the target morsel schema\n if columns:\n column_names = [col.name for col in columns]\n query_builder.add(\"SELECT\", *column_names)\n result_schema.columns = [col for col in self.schema.columns if col.name in column_names]\n else:\n query_builder.add(\"SELECT\", \"*\")\n\n morsel = DataFrame(schema=result_schema)\n\n with self._engine.connect() as conn:\n # DEBUG: log (\"READ DATASET\\n\", str(query_builder))\n for row in conn.execute(str(query_builder)):\n morsel._rows.append(row)\n if len(morsel) == self.chunk_size:\n yield morsel.arrow()\n\n if morsel.nbytes > 0:\n self.chunk_size = int(len(morsel) // (morsel.nbytes / DEFAULT_MORSEL_SIZE))\n\n morsel = DataFrame(schema=result_schema)\n\n if len(morsel) > 0:\n yield morsel.arrow()\n\n def get_dataset_schema(self) -> RelationSchema:\n from sqlalchemy import Table\n\n if self.schema:\n return self.schema\n\n # Try to read the schema from the metastore\n self.schema = self.read_schema_from_metastore()\n if self.schema:\n return self.schema\n\n # get the schema from the dataset\n # DEBUG: log (\"GET SQL SCHEMA:\", self.dataset)\n table = Table(self.dataset, self.metadata, autoload_with=self._engine)\n\n self.schema = RelationSchema(\n name=table.name,\n columns=[\n FlatColumn(\n name=column.name,\n type=PYTHON_TO_ORSO_MAP[column.type.python_type],\n precision=None if column.type.python_type != Decimal else column.type.precision,\n scale=None if column.type.python_type != Decimal else column.type.scale,\n nullable=column.nullable,\n )\n for column in table.columns\n ],\n )\n\n return self.schema\n", "path": "opteryx/connectors/sql_connector.py"}]} | 3,402 | 776 |
gh_patches_debug_54233 | rasdani/github-patches | git_diff | celery__celery-6599 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
celery worker -q --quiet seems not working with 4.4.7
Running celery 4.4.7 [not woking]
Running celery 4.4.6 [ok]
Running celery 4.4.1 [ok]
commands :
> celery -A proj worker --loglevel=info -P eventlet --quiet
> celery -A proj worker --loglevel=info -P eventlet -q
</issue>
<code>
[start of celery/bin/worker.py]
1 """Program used to start a Celery worker instance."""
2
3 import os
4 import sys
5
6 import click
7 from click import ParamType
8 from click.types import StringParamType
9
10 from celery import concurrency
11 from celery.bin.base import (COMMA_SEPARATED_LIST, LOG_LEVEL,
12 CeleryDaemonCommand, CeleryOption,
13 handle_preload_options)
14 from celery.exceptions import SecurityError
15 from celery.platforms import (EX_FAILURE, EX_OK, detached,
16 maybe_drop_privileges)
17 from celery.utils.log import get_logger
18 from celery.utils.nodenames import default_nodename, host_format, node_format
19
20 logger = get_logger(__name__)
21
22
23 class CeleryBeat(ParamType):
24 """Celery Beat flag."""
25
26 name = "beat"
27
28 def convert(self, value, param, ctx):
29 if ctx.obj.app.IS_WINDOWS and value:
30 self.fail('-B option does not work on Windows. '
31 'Please run celery beat as a separate service.')
32
33 return value
34
35
36 class WorkersPool(click.Choice):
37 """Workers pool option."""
38
39 name = "pool"
40
41 def __init__(self):
42 """Initialize the workers pool option with the relevant choices."""
43 super().__init__(('prefork', 'eventlet', 'gevent', 'solo'))
44
45 def convert(self, value, param, ctx):
46 # Pools like eventlet/gevent needs to patch libs as early
47 # as possible.
48 return concurrency.get_implementation(
49 value) or ctx.obj.app.conf.worker_pool
50
51
52 class Hostname(StringParamType):
53 """Hostname option."""
54
55 name = "hostname"
56
57 def convert(self, value, param, ctx):
58 return host_format(default_nodename(value))
59
60
61 class Autoscale(ParamType):
62 """Autoscaling parameter."""
63
64 name = "<min workers>, <max workers>"
65
66 def convert(self, value, param, ctx):
67 value = value.split(',')
68
69 if len(value) > 2:
70 self.fail("Expected two comma separated integers or one integer."
71 f"Got {len(value)} instead.")
72
73 if len(value) == 1:
74 try:
75 value = (int(value[0]), 0)
76 except ValueError:
77 self.fail(f"Expected an integer. Got {value} instead.")
78
79 try:
80 return tuple(reversed(sorted(map(int, value))))
81 except ValueError:
82 self.fail("Expected two comma separated integers."
83 f"Got {value.join(',')} instead.")
84
85
86 CELERY_BEAT = CeleryBeat()
87 WORKERS_POOL = WorkersPool()
88 HOSTNAME = Hostname()
89 AUTOSCALE = Autoscale()
90
91 C_FAKEFORK = os.environ.get('C_FAKEFORK')
92
93
94 def detach(path, argv, logfile=None, pidfile=None, uid=None,
95 gid=None, umask=None, workdir=None, fake=False, app=None,
96 executable=None, hostname=None):
97 """Detach program by argv."""
98 fake = 1 if C_FAKEFORK else fake
99 # `detached()` will attempt to touch the logfile to confirm that error
100 # messages won't be lost after detaching stdout/err, but this means we need
101 # to pre-format it rather than relying on `setup_logging_subsystem()` like
102 # we can elsewhere.
103 logfile = node_format(logfile, hostname)
104 with detached(logfile, pidfile, uid, gid, umask, workdir, fake,
105 after_forkers=False):
106 try:
107 if executable is not None:
108 path = executable
109 os.execv(path, [path] + argv)
110 return EX_OK
111 except Exception: # pylint: disable=broad-except
112 if app is None:
113 from celery import current_app
114 app = current_app
115 app.log.setup_logging_subsystem(
116 'ERROR', logfile, hostname=hostname)
117 logger.critical("Can't exec %r", ' '.join([path] + argv),
118 exc_info=True)
119 return EX_FAILURE
120
121
122 @click.command(cls=CeleryDaemonCommand,
123 context_settings={'allow_extra_args': True})
124 @click.option('-n',
125 '--hostname',
126 default=host_format(default_nodename(None)),
127 cls=CeleryOption,
128 type=HOSTNAME,
129 help_group="Worker Options",
130 help="Set custom hostname (e.g., 'w1@%%h'). "
131 "Expands: %%h (hostname), %%n (name) and %%d, (domain).")
132 @click.option('-D',
133 '--detach',
134 cls=CeleryOption,
135 is_flag=True,
136 default=False,
137 help_group="Worker Options",
138 help="Start worker as a background process.")
139 @click.option('-S',
140 '--statedb',
141 cls=CeleryOption,
142 type=click.Path(),
143 callback=lambda ctx, _, value: value or ctx.obj.app.conf.worker_state_db,
144 help_group="Worker Options",
145 help="Path to the state database. The extension '.db' may be "
146 "appended to the filename.")
147 @click.option('-l',
148 '--loglevel',
149 default='WARNING',
150 cls=CeleryOption,
151 type=LOG_LEVEL,
152 help_group="Worker Options",
153 help="Logging level.")
154 @click.option('optimization',
155 '-O',
156 default='default',
157 cls=CeleryOption,
158 type=click.Choice(('default', 'fair')),
159 help_group="Worker Options",
160 help="Apply optimization profile.")
161 @click.option('--prefetch-multiplier',
162 type=int,
163 metavar="<prefetch multiplier>",
164 callback=lambda ctx, _, value: value or ctx.obj.app.conf.worker_prefetch_multiplier,
165 cls=CeleryOption,
166 help_group="Worker Options",
167 help="Set custom prefetch multiplier value"
168 "for this worker instance.")
169 @click.option('-c',
170 '--concurrency',
171 type=int,
172 metavar="<concurrency>",
173 callback=lambda ctx, _, value: value or ctx.obj.app.conf.worker_concurrency,
174 cls=CeleryOption,
175 help_group="Pool Options",
176 help="Number of child processes processing the queue. "
177 "The default is the number of CPUs available"
178 "on your system.")
179 @click.option('-P',
180 '--pool',
181 default='prefork',
182 type=WORKERS_POOL,
183 cls=CeleryOption,
184 help_group="Pool Options",
185 help="Pool implementation.")
186 @click.option('-E',
187 '--task-events',
188 '--events',
189 is_flag=True,
190 cls=CeleryOption,
191 help_group="Pool Options",
192 help="Send task-related events that can be captured by monitors"
193 " like celery events, celerymon, and others.")
194 @click.option('--time-limit',
195 type=float,
196 cls=CeleryOption,
197 help_group="Pool Options",
198 help="Enables a hard time limit "
199 "(in seconds int/float) for tasks.")
200 @click.option('--soft-time-limit',
201 type=float,
202 cls=CeleryOption,
203 help_group="Pool Options",
204 help="Enables a soft time limit "
205 "(in seconds int/float) for tasks.")
206 @click.option('--max-tasks-per-child',
207 type=int,
208 cls=CeleryOption,
209 help_group="Pool Options",
210 help="Maximum number of tasks a pool worker can execute before "
211 "it's terminated and replaced by a new worker.")
212 @click.option('--max-memory-per-child',
213 type=int,
214 cls=CeleryOption,
215 help_group="Pool Options",
216 help="Maximum amount of resident memory, in KiB, that may be "
217 "consumed by a child process before it will be replaced "
218 "by a new one. If a single task causes a child process "
219 "to exceed this limit, the task will be completed and "
220 "the child process will be replaced afterwards.\n"
221 "Default: no limit.")
222 @click.option('--purge',
223 '--discard',
224 is_flag=True,
225 cls=CeleryOption,
226 help_group="Queue Options")
227 @click.option('--queues',
228 '-Q',
229 type=COMMA_SEPARATED_LIST,
230 cls=CeleryOption,
231 help_group="Queue Options")
232 @click.option('--exclude-queues',
233 '-X',
234 type=COMMA_SEPARATED_LIST,
235 cls=CeleryOption,
236 help_group="Queue Options")
237 @click.option('--include',
238 '-I',
239 type=COMMA_SEPARATED_LIST,
240 cls=CeleryOption,
241 help_group="Queue Options")
242 @click.option('--without-gossip',
243 is_flag=True,
244 cls=CeleryOption,
245 help_group="Features")
246 @click.option('--without-mingle',
247 is_flag=True,
248 cls=CeleryOption,
249 help_group="Features")
250 @click.option('--without-heartbeat',
251 is_flag=True,
252 cls=CeleryOption,
253 help_group="Features", )
254 @click.option('--heartbeat-interval',
255 type=int,
256 cls=CeleryOption,
257 help_group="Features", )
258 @click.option('--autoscale',
259 type=AUTOSCALE,
260 cls=CeleryOption,
261 help_group="Features", )
262 @click.option('-B',
263 '--beat',
264 type=CELERY_BEAT,
265 cls=CeleryOption,
266 is_flag=True,
267 help_group="Embedded Beat Options")
268 @click.option('-s',
269 '--schedule-filename',
270 '--schedule',
271 callback=lambda ctx, _, value: value or ctx.obj.app.conf.beat_schedule_filename,
272 cls=CeleryOption,
273 help_group="Embedded Beat Options")
274 @click.option('--scheduler',
275 cls=CeleryOption,
276 help_group="Embedded Beat Options")
277 @click.pass_context
278 @handle_preload_options
279 def worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,
280 loglevel=None, logfile=None, pidfile=None, statedb=None,
281 **kwargs):
282 """Start worker instance.
283
284 Examples
285 --------
286 $ celery --app=proj worker -l INFO
287 $ celery -A proj worker -l INFO -Q hipri,lopri
288 $ celery -A proj worker --concurrency=4
289 $ celery -A proj worker --concurrency=1000 -P eventlet
290 $ celery worker --autoscale=10,0
291
292 """
293 try:
294 app = ctx.obj.app
295 if ctx.args:
296 try:
297 app.config_from_cmdline(ctx.args, namespace='worker')
298 except (KeyError, ValueError) as e:
299 # TODO: Improve the error messages
300 raise click.UsageError(
301 "Unable to parse extra configuration from command line.\n"
302 f"Reason: {e}", ctx=ctx)
303 if kwargs.get('detach', False):
304 argv = ['-m', 'celery'] + sys.argv[1:]
305 if '--detach' in argv:
306 argv.remove('--detach')
307 if '-D' in argv:
308 argv.remove('-D')
309
310 return detach(sys.executable,
311 argv,
312 logfile=logfile,
313 pidfile=pidfile,
314 uid=uid, gid=gid,
315 umask=kwargs.get('umask', None),
316 workdir=kwargs.get('workdir', None),
317 app=app,
318 executable=kwargs.get('executable', None),
319 hostname=hostname)
320
321 maybe_drop_privileges(uid=uid, gid=gid)
322 worker = app.Worker(
323 hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,
324 logfile=logfile, # node format handled by celery.app.log.setup
325 pidfile=node_format(pidfile, hostname),
326 statedb=node_format(statedb, hostname),
327 no_color=ctx.obj.no_color,
328 **kwargs)
329 worker.start()
330 return worker.exitcode
331 except SecurityError as e:
332 ctx.obj.error(e.args[0])
333 ctx.exit(1)
334
[end of celery/bin/worker.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/celery/bin/worker.py b/celery/bin/worker.py
--- a/celery/bin/worker.py
+++ b/celery/bin/worker.py
@@ -325,6 +325,7 @@
pidfile=node_format(pidfile, hostname),
statedb=node_format(statedb, hostname),
no_color=ctx.obj.no_color,
+ quiet=ctx.obj.quiet,
**kwargs)
worker.start()
return worker.exitcode
| {"golden_diff": "diff --git a/celery/bin/worker.py b/celery/bin/worker.py\n--- a/celery/bin/worker.py\n+++ b/celery/bin/worker.py\n@@ -325,6 +325,7 @@\n pidfile=node_format(pidfile, hostname),\n statedb=node_format(statedb, hostname),\n no_color=ctx.obj.no_color,\n+ quiet=ctx.obj.quiet,\n **kwargs)\n worker.start()\n return worker.exitcode\n", "issue": "celery worker -q --quiet seems not working with 4.4.7\nRunning celery 4.4.7 [not woking]\r\nRunning celery 4.4.6 [ok]\r\nRunning celery 4.4.1 [ok]\r\n\r\n\r\ncommands :\r\n\r\n> celery -A proj worker --loglevel=info -P eventlet --quiet\r\n> celery -A proj worker --loglevel=info -P eventlet -q\r\n\n", "before_files": [{"content": "\"\"\"Program used to start a Celery worker instance.\"\"\"\n\nimport os\nimport sys\n\nimport click\nfrom click import ParamType\nfrom click.types import StringParamType\n\nfrom celery import concurrency\nfrom celery.bin.base import (COMMA_SEPARATED_LIST, LOG_LEVEL,\n CeleryDaemonCommand, CeleryOption,\n handle_preload_options)\nfrom celery.exceptions import SecurityError\nfrom celery.platforms import (EX_FAILURE, EX_OK, detached,\n maybe_drop_privileges)\nfrom celery.utils.log import get_logger\nfrom celery.utils.nodenames import default_nodename, host_format, node_format\n\nlogger = get_logger(__name__)\n\n\nclass CeleryBeat(ParamType):\n \"\"\"Celery Beat flag.\"\"\"\n\n name = \"beat\"\n\n def convert(self, value, param, ctx):\n if ctx.obj.app.IS_WINDOWS and value:\n self.fail('-B option does not work on Windows. '\n 'Please run celery beat as a separate service.')\n\n return value\n\n\nclass WorkersPool(click.Choice):\n \"\"\"Workers pool option.\"\"\"\n\n name = \"pool\"\n\n def __init__(self):\n \"\"\"Initialize the workers pool option with the relevant choices.\"\"\"\n super().__init__(('prefork', 'eventlet', 'gevent', 'solo'))\n\n def convert(self, value, param, ctx):\n # Pools like eventlet/gevent needs to patch libs as early\n # as possible.\n return concurrency.get_implementation(\n value) or ctx.obj.app.conf.worker_pool\n\n\nclass Hostname(StringParamType):\n \"\"\"Hostname option.\"\"\"\n\n name = \"hostname\"\n\n def convert(self, value, param, ctx):\n return host_format(default_nodename(value))\n\n\nclass Autoscale(ParamType):\n \"\"\"Autoscaling parameter.\"\"\"\n\n name = \"<min workers>, <max workers>\"\n\n def convert(self, value, param, ctx):\n value = value.split(',')\n\n if len(value) > 2:\n self.fail(\"Expected two comma separated integers or one integer.\"\n f\"Got {len(value)} instead.\")\n\n if len(value) == 1:\n try:\n value = (int(value[0]), 0)\n except ValueError:\n self.fail(f\"Expected an integer. Got {value} instead.\")\n\n try:\n return tuple(reversed(sorted(map(int, value))))\n except ValueError:\n self.fail(\"Expected two comma separated integers.\"\n f\"Got {value.join(',')} instead.\")\n\n\nCELERY_BEAT = CeleryBeat()\nWORKERS_POOL = WorkersPool()\nHOSTNAME = Hostname()\nAUTOSCALE = Autoscale()\n\nC_FAKEFORK = os.environ.get('C_FAKEFORK')\n\n\ndef detach(path, argv, logfile=None, pidfile=None, uid=None,\n gid=None, umask=None, workdir=None, fake=False, app=None,\n executable=None, hostname=None):\n \"\"\"Detach program by argv.\"\"\"\n fake = 1 if C_FAKEFORK else fake\n # `detached()` will attempt to touch the logfile to confirm that error\n # messages won't be lost after detaching stdout/err, but this means we need\n # to pre-format it rather than relying on `setup_logging_subsystem()` like\n # we can elsewhere.\n logfile = node_format(logfile, hostname)\n with detached(logfile, pidfile, uid, gid, umask, workdir, fake,\n after_forkers=False):\n try:\n if executable is not None:\n path = executable\n os.execv(path, [path] + argv)\n return EX_OK\n except Exception: # pylint: disable=broad-except\n if app is None:\n from celery import current_app\n app = current_app\n app.log.setup_logging_subsystem(\n 'ERROR', logfile, hostname=hostname)\n logger.critical(\"Can't exec %r\", ' '.join([path] + argv),\n exc_info=True)\n return EX_FAILURE\n\n\[email protected](cls=CeleryDaemonCommand,\n context_settings={'allow_extra_args': True})\[email protected]('-n',\n '--hostname',\n default=host_format(default_nodename(None)),\n cls=CeleryOption,\n type=HOSTNAME,\n help_group=\"Worker Options\",\n help=\"Set custom hostname (e.g., 'w1@%%h'). \"\n \"Expands: %%h (hostname), %%n (name) and %%d, (domain).\")\[email protected]('-D',\n '--detach',\n cls=CeleryOption,\n is_flag=True,\n default=False,\n help_group=\"Worker Options\",\n help=\"Start worker as a background process.\")\[email protected]('-S',\n '--statedb',\n cls=CeleryOption,\n type=click.Path(),\n callback=lambda ctx, _, value: value or ctx.obj.app.conf.worker_state_db,\n help_group=\"Worker Options\",\n help=\"Path to the state database. The extension '.db' may be \"\n \"appended to the filename.\")\[email protected]('-l',\n '--loglevel',\n default='WARNING',\n cls=CeleryOption,\n type=LOG_LEVEL,\n help_group=\"Worker Options\",\n help=\"Logging level.\")\[email protected]('optimization',\n '-O',\n default='default',\n cls=CeleryOption,\n type=click.Choice(('default', 'fair')),\n help_group=\"Worker Options\",\n help=\"Apply optimization profile.\")\[email protected]('--prefetch-multiplier',\n type=int,\n metavar=\"<prefetch multiplier>\",\n callback=lambda ctx, _, value: value or ctx.obj.app.conf.worker_prefetch_multiplier,\n cls=CeleryOption,\n help_group=\"Worker Options\",\n help=\"Set custom prefetch multiplier value\"\n \"for this worker instance.\")\[email protected]('-c',\n '--concurrency',\n type=int,\n metavar=\"<concurrency>\",\n callback=lambda ctx, _, value: value or ctx.obj.app.conf.worker_concurrency,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Number of child processes processing the queue. \"\n \"The default is the number of CPUs available\"\n \"on your system.\")\[email protected]('-P',\n '--pool',\n default='prefork',\n type=WORKERS_POOL,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Pool implementation.\")\[email protected]('-E',\n '--task-events',\n '--events',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Send task-related events that can be captured by monitors\"\n \" like celery events, celerymon, and others.\")\[email protected]('--time-limit',\n type=float,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Enables a hard time limit \"\n \"(in seconds int/float) for tasks.\")\[email protected]('--soft-time-limit',\n type=float,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Enables a soft time limit \"\n \"(in seconds int/float) for tasks.\")\[email protected]('--max-tasks-per-child',\n type=int,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Maximum number of tasks a pool worker can execute before \"\n \"it's terminated and replaced by a new worker.\")\[email protected]('--max-memory-per-child',\n type=int,\n cls=CeleryOption,\n help_group=\"Pool Options\",\n help=\"Maximum amount of resident memory, in KiB, that may be \"\n \"consumed by a child process before it will be replaced \"\n \"by a new one. If a single task causes a child process \"\n \"to exceed this limit, the task will be completed and \"\n \"the child process will be replaced afterwards.\\n\"\n \"Default: no limit.\")\[email protected]('--purge',\n '--discard',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Queue Options\")\[email protected]('--queues',\n '-Q',\n type=COMMA_SEPARATED_LIST,\n cls=CeleryOption,\n help_group=\"Queue Options\")\[email protected]('--exclude-queues',\n '-X',\n type=COMMA_SEPARATED_LIST,\n cls=CeleryOption,\n help_group=\"Queue Options\")\[email protected]('--include',\n '-I',\n type=COMMA_SEPARATED_LIST,\n cls=CeleryOption,\n help_group=\"Queue Options\")\[email protected]('--without-gossip',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Features\")\[email protected]('--without-mingle',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Features\")\[email protected]('--without-heartbeat',\n is_flag=True,\n cls=CeleryOption,\n help_group=\"Features\", )\[email protected]('--heartbeat-interval',\n type=int,\n cls=CeleryOption,\n help_group=\"Features\", )\[email protected]('--autoscale',\n type=AUTOSCALE,\n cls=CeleryOption,\n help_group=\"Features\", )\[email protected]('-B',\n '--beat',\n type=CELERY_BEAT,\n cls=CeleryOption,\n is_flag=True,\n help_group=\"Embedded Beat Options\")\[email protected]('-s',\n '--schedule-filename',\n '--schedule',\n callback=lambda ctx, _, value: value or ctx.obj.app.conf.beat_schedule_filename,\n cls=CeleryOption,\n help_group=\"Embedded Beat Options\")\[email protected]('--scheduler',\n cls=CeleryOption,\n help_group=\"Embedded Beat Options\")\[email protected]_context\n@handle_preload_options\ndef worker(ctx, hostname=None, pool_cls=None, app=None, uid=None, gid=None,\n loglevel=None, logfile=None, pidfile=None, statedb=None,\n **kwargs):\n \"\"\"Start worker instance.\n\n Examples\n --------\n $ celery --app=proj worker -l INFO\n $ celery -A proj worker -l INFO -Q hipri,lopri\n $ celery -A proj worker --concurrency=4\n $ celery -A proj worker --concurrency=1000 -P eventlet\n $ celery worker --autoscale=10,0\n\n \"\"\"\n try:\n app = ctx.obj.app\n if ctx.args:\n try:\n app.config_from_cmdline(ctx.args, namespace='worker')\n except (KeyError, ValueError) as e:\n # TODO: Improve the error messages\n raise click.UsageError(\n \"Unable to parse extra configuration from command line.\\n\"\n f\"Reason: {e}\", ctx=ctx)\n if kwargs.get('detach', False):\n argv = ['-m', 'celery'] + sys.argv[1:]\n if '--detach' in argv:\n argv.remove('--detach')\n if '-D' in argv:\n argv.remove('-D')\n\n return detach(sys.executable,\n argv,\n logfile=logfile,\n pidfile=pidfile,\n uid=uid, gid=gid,\n umask=kwargs.get('umask', None),\n workdir=kwargs.get('workdir', None),\n app=app,\n executable=kwargs.get('executable', None),\n hostname=hostname)\n\n maybe_drop_privileges(uid=uid, gid=gid)\n worker = app.Worker(\n hostname=hostname, pool_cls=pool_cls, loglevel=loglevel,\n logfile=logfile, # node format handled by celery.app.log.setup\n pidfile=node_format(pidfile, hostname),\n statedb=node_format(statedb, hostname),\n no_color=ctx.obj.no_color,\n **kwargs)\n worker.start()\n return worker.exitcode\n except SecurityError as e:\n ctx.obj.error(e.args[0])\n ctx.exit(1)\n", "path": "celery/bin/worker.py"}]} | 4,056 | 107 |
gh_patches_debug_9353 | rasdani/github-patches | git_diff | encode__uvicorn-286 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
NameError: name 'get_logger' is not defined
Happens only when running in debug mode.
```
$ uvicorn app:App --debug
Traceback (most recent call last):
File "/home/jamie.chang/.local/bin/uvicorn", line 11, in <module>
sys.exit(main())
File "/home/jamie.chang/.local/lib/python3.5/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/home/jamie.chang/.local/lib/python3.5/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/home/jamie.chang/.local/lib/python3.5/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/home/jamie.chang/.local/lib/python3.5/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/home/jamie.chang/.local/lib/python3.5/site-packages/uvicorn/main.py", line 166, in main
logger = get_logger(log_level)
NameError: name 'get_logger' is not defined```
</issue>
<code>
[start of uvicorn/main.py]
1 from uvicorn.config import Config, LOG_LEVELS, HTTP_PROTOCOLS, WS_PROTOCOLS, LOOP_SETUPS
2 from uvicorn.global_state import GlobalState
3 from uvicorn.lifespan import Lifespan
4 from uvicorn.reloaders.statreload import StatReload
5 import asyncio
6 import click
7 import signal
8 import os
9 import logging
10 import socket
11 import sys
12 import time
13 import multiprocessing
14
15
16 LEVEL_CHOICES = click.Choice(LOG_LEVELS.keys())
17 HTTP_CHOICES = click.Choice(HTTP_PROTOCOLS.keys())
18 WS_CHOICES = click.Choice(WS_PROTOCOLS.keys())
19 LOOP_CHOICES = click.Choice(LOOP_SETUPS.keys())
20
21 HANDLED_SIGNALS = (
22 signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.
23 signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.
24 )
25
26
27 @click.command()
28 @click.argument("app")
29 @click.option(
30 "--host",
31 type=str,
32 default="127.0.0.1",
33 help="Bind socket to this host.",
34 show_default=True,
35 )
36 @click.option(
37 "--port",
38 type=int,
39 default=8000,
40 help="Bind socket to this port.",
41 show_default=True,
42 )
43 @click.option("--uds", type=str, default=None, help="Bind to a UNIX domain socket.")
44 @click.option(
45 "--fd", type=int, default=None, help="Bind to socket from this file descriptor."
46 )
47 @click.option(
48 "--loop",
49 type=LOOP_CHOICES,
50 default="auto",
51 help="Event loop implementation.",
52 show_default=True,
53 )
54 @click.option(
55 "--http",
56 type=HTTP_CHOICES,
57 default="auto",
58 help="HTTP protocol implementation.",
59 show_default=True,
60 )
61 @click.option(
62 "--ws",
63 type=WS_CHOICES,
64 default="auto",
65 help="WebSocket protocol implementation.",
66 show_default=True,
67 )
68 @click.option(
69 "--wsgi",
70 is_flag=True,
71 default=False,
72 help="Use WSGI as the application interface, instead of ASGI.",
73 )
74 @click.option("--debug", is_flag=True, default=False, help="Enable debug mode.")
75 @click.option(
76 "--log-level",
77 type=LEVEL_CHOICES,
78 default="info",
79 help="Log level.",
80 show_default=True,
81 )
82 @click.option(
83 "--no-access-log", is_flag=True, default=False, help="Disable access log."
84 )
85 @click.option(
86 "--proxy-headers",
87 is_flag=True,
88 default=False,
89 help="Use X-Forwarded-Proto, X-Forwarded-For, X-Forwarded-Port to populate remote address info.",
90 )
91 @click.option(
92 "--root-path",
93 type=str,
94 default="",
95 help="Set the ASGI 'root_path' for applications submounted below a given URL path.",
96 )
97 @click.option(
98 "--limit-concurrency",
99 type=int,
100 default=None,
101 help="Maximum number of concurrent connections or tasks to allow, before issuing HTTP 503 responses.",
102 )
103 @click.option(
104 "--limit-max-requests",
105 type=int,
106 default=None,
107 help="Maximum number of requests to service before terminating the process.",
108 )
109 @click.option(
110 "--timeout-keep-alive",
111 type=int,
112 default=5,
113 help="Close Keep-Alive connections if no new data is received within this timeout.",
114 show_default=True,
115 )
116 @click.option(
117 "--disable-lifespan",
118 is_flag=True,
119 default=False,
120 help="Disable lifespan events (such as startup and shutdown) within an ASGI application.",
121 )
122 def main(
123 app,
124 host: str,
125 port: int,
126 uds: str,
127 fd: int,
128 loop: str,
129 http: str,
130 ws: str,
131 wsgi: bool,
132 debug: bool,
133 log_level: str,
134 no_access_log: bool,
135 proxy_headers: bool,
136 root_path: str,
137 limit_concurrency: int,
138 limit_max_requests: int,
139 timeout_keep_alive: int,
140 disable_lifespan: bool,
141 ):
142 sys.path.insert(0, ".")
143
144 kwargs = {
145 "app": app,
146 "host": host,
147 "port": port,
148 "uds": uds,
149 "fd": fd,
150 "loop": loop,
151 "http": http,
152 "ws": ws,
153 "log_level": log_level,
154 "access_log": not no_access_log,
155 "wsgi": wsgi,
156 "debug": debug,
157 "proxy_headers": proxy_headers,
158 "root_path": root_path,
159 "limit_concurrency": limit_concurrency,
160 "limit_max_requests": limit_max_requests,
161 "timeout_keep_alive": timeout_keep_alive,
162 "disable_lifespan": disable_lifespan,
163 }
164
165 if debug:
166 logger = get_logger(log_level)
167 reloader = StatReload(logger)
168 reloader.run(run, kwargs)
169 else:
170 run(**kwargs)
171
172
173 def run(**kwargs):
174 if 'global_state' in kwargs:
175 global_state = kwargs.pop('global_state')
176 else:
177 global_state = GlobalState()
178
179 config = Config(**kwargs)
180
181 server = Server(config=config, global_state=global_state)
182 server.run()
183
184
185 class Server:
186 def __init__(self, config, global_state):
187 self.config = config
188 self.global_state = global_state
189
190 self.app = config.app
191 self.host = config.host
192 self.port = config.port
193 self.uds = config.uds
194 self.sock = config.sock
195 self.logger = config.logger
196 self.loop = config.loop
197 self.limit_max_requests = config.limit_max_requests
198 self.disable_lifespan = config.disable_lifespan
199 self.on_tick = config.http_protocol_class.tick
200 self.install_signal_handlers = config.install_signal_handlers
201 self.should_exit = False
202 self.force_exit = False
203 self.pid = os.getpid()
204
205 def create_protocol():
206 return config.http_protocol_class(
207 config=config,
208 global_state=global_state
209 )
210
211 self.create_protocol = create_protocol
212
213 def set_signal_handlers(self):
214 if not self.install_signal_handlers:
215 return
216
217 try:
218 for sig in HANDLED_SIGNALS:
219 self.loop.add_signal_handler(sig, self.handle_exit, sig, None)
220 except NotImplementedError as exc:
221 # Windows
222 for sig in HANDLED_SIGNALS:
223 signal.signal(sig, self.handle_exit)
224
225 def handle_exit(self, sig, frame):
226 if self.should_exit:
227 self.force_exit = True
228 else:
229 self.should_exit = True
230
231 def run(self):
232 self.logger.info("Started server process [{}]".format(self.pid))
233 self.set_signal_handlers()
234 if not self.disable_lifespan:
235 self.lifespan = Lifespan(self.app, self.logger)
236 if self.lifespan.is_enabled:
237 self.logger.info("Waiting for application startup.")
238 self.loop.create_task(self.lifespan.run())
239 self.loop.run_until_complete(self.lifespan.wait_startup())
240 if self.lifespan.error_occured:
241 self.logger.error("Application startup failed. Exiting.")
242 return
243 else:
244 self.logger.debug(
245 "Lifespan protocol is not recognized by the application"
246 )
247 self.loop.run_until_complete(self.create_server())
248 self.loop.create_task(self.tick())
249 self.global_state.started.set()
250 self.loop.run_forever()
251
252 async def create_server(self):
253 if self.sock is not None:
254 # Use an existing socket.
255 self.server = await self.loop.create_server(
256 self.create_protocol, sock=self.sock
257 )
258 message = "Uvicorn running on socket %s (Press CTRL+C to quit)"
259 self.logger.info(message % str(self.sock.getsockname()))
260
261 elif self.uds is not None:
262 # Create a socket using UNIX domain socket.
263 self.server = await self.loop.create_unix_server(
264 self.create_protocol, path=self.uds
265 )
266 message = "Uvicorn running on unix socket %s (Press CTRL+C to quit)"
267 self.logger.info(message % self.uds)
268
269 else:
270 # Standard case. Create a socket from a host/port pair.
271 self.server = await self.loop.create_server(
272 self.create_protocol, host=self.host, port=self.port
273 )
274 message = "Uvicorn running on http://%s:%d (Press CTRL+C to quit)"
275 self.logger.info(message % (self.host, self.port))
276
277 async def tick(self):
278 should_limit_requests = self.limit_max_requests is not None
279
280 while not self.should_exit:
281 if (
282 should_limit_requests
283 and self.global_state.total_requests >= self.limit_max_requests
284 ):
285 break
286 self.on_tick()
287 await asyncio.sleep(1)
288
289 self.logger.info("Stopping server process [{}]".format(self.pid))
290 self.server.close()
291 await self.server.wait_closed()
292 for connection in list(self.global_state.connections):
293 connection.shutdown()
294
295 await asyncio.sleep(0.1)
296 if self.global_state.connections and not self.force_exit:
297 self.logger.info("Waiting for connections to close. (Press CTRL+C to force quit)")
298 while self.global_state.connections and not self.force_exit:
299 await asyncio.sleep(0.1)
300 if self.global_state.tasks and not self.force_exit:
301 self.logger.info("Waiting for background tasks to complete. (Press CTRL+C to force quit)")
302 while self.global_state.tasks and not self.force_exit:
303 await asyncio.sleep(0.1)
304
305 if not self.disable_lifespan and self.lifespan.is_enabled and not self.force_exit:
306 self.logger.info("Waiting for application shutdown.")
307 await self.lifespan.wait_shutdown()
308
309 if self.force_exit:
310 self.logger.info("Forced quit.")
311
312 self.loop.stop()
313
314
315 if __name__ == "__main__":
316 main()
317
[end of uvicorn/main.py]
[start of uvicorn/__init__.py]
1 from uvicorn.main import main, run
2
3 __version__ = "0.3.29"
4 __all__ = ["main", "run"]
5
[end of uvicorn/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/uvicorn/__init__.py b/uvicorn/__init__.py
--- a/uvicorn/__init__.py
+++ b/uvicorn/__init__.py
@@ -1,4 +1,4 @@
from uvicorn.main import main, run
-__version__ = "0.3.29"
+__version__ = "0.3.30"
__all__ = ["main", "run"]
diff --git a/uvicorn/main.py b/uvicorn/main.py
--- a/uvicorn/main.py
+++ b/uvicorn/main.py
@@ -1,4 +1,4 @@
-from uvicorn.config import Config, LOG_LEVELS, HTTP_PROTOCOLS, WS_PROTOCOLS, LOOP_SETUPS
+from uvicorn.config import get_logger, Config, LOG_LEVELS, HTTP_PROTOCOLS, WS_PROTOCOLS, LOOP_SETUPS
from uvicorn.global_state import GlobalState
from uvicorn.lifespan import Lifespan
from uvicorn.reloaders.statreload import StatReload
| {"golden_diff": "diff --git a/uvicorn/__init__.py b/uvicorn/__init__.py\n--- a/uvicorn/__init__.py\n+++ b/uvicorn/__init__.py\n@@ -1,4 +1,4 @@\n from uvicorn.main import main, run\n \n-__version__ = \"0.3.29\"\n+__version__ = \"0.3.30\"\n __all__ = [\"main\", \"run\"]\ndiff --git a/uvicorn/main.py b/uvicorn/main.py\n--- a/uvicorn/main.py\n+++ b/uvicorn/main.py\n@@ -1,4 +1,4 @@\n-from uvicorn.config import Config, LOG_LEVELS, HTTP_PROTOCOLS, WS_PROTOCOLS, LOOP_SETUPS\n+from uvicorn.config import get_logger, Config, LOG_LEVELS, HTTP_PROTOCOLS, WS_PROTOCOLS, LOOP_SETUPS\n from uvicorn.global_state import GlobalState\n from uvicorn.lifespan import Lifespan\n from uvicorn.reloaders.statreload import StatReload\n", "issue": "NameError: name 'get_logger' is not defined\nHappens only when running in debug mode.\r\n```\r\n$ uvicorn app:App --debug\r\nTraceback (most recent call last):\r\n File \"/home/jamie.chang/.local/bin/uvicorn\", line 11, in <module>\r\n sys.exit(main())\r\n File \"/home/jamie.chang/.local/lib/python3.5/site-packages/click/core.py\", line 764, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/home/jamie.chang/.local/lib/python3.5/site-packages/click/core.py\", line 717, in main\r\n rv = self.invoke(ctx)\r\n File \"/home/jamie.chang/.local/lib/python3.5/site-packages/click/core.py\", line 956, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/home/jamie.chang/.local/lib/python3.5/site-packages/click/core.py\", line 555, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/home/jamie.chang/.local/lib/python3.5/site-packages/uvicorn/main.py\", line 166, in main\r\n logger = get_logger(log_level)\r\nNameError: name 'get_logger' is not defined```\n", "before_files": [{"content": "from uvicorn.config import Config, LOG_LEVELS, HTTP_PROTOCOLS, WS_PROTOCOLS, LOOP_SETUPS\nfrom uvicorn.global_state import GlobalState\nfrom uvicorn.lifespan import Lifespan\nfrom uvicorn.reloaders.statreload import StatReload\nimport asyncio\nimport click\nimport signal\nimport os\nimport logging\nimport socket\nimport sys\nimport time\nimport multiprocessing\n\n\nLEVEL_CHOICES = click.Choice(LOG_LEVELS.keys())\nHTTP_CHOICES = click.Choice(HTTP_PROTOCOLS.keys())\nWS_CHOICES = click.Choice(WS_PROTOCOLS.keys())\nLOOP_CHOICES = click.Choice(LOOP_SETUPS.keys())\n\nHANDLED_SIGNALS = (\n signal.SIGINT, # Unix signal 2. Sent by Ctrl+C.\n signal.SIGTERM, # Unix signal 15. Sent by `kill <pid>`.\n)\n\n\[email protected]()\[email protected](\"app\")\[email protected](\n \"--host\",\n type=str,\n default=\"127.0.0.1\",\n help=\"Bind socket to this host.\",\n show_default=True,\n)\[email protected](\n \"--port\",\n type=int,\n default=8000,\n help=\"Bind socket to this port.\",\n show_default=True,\n)\[email protected](\"--uds\", type=str, default=None, help=\"Bind to a UNIX domain socket.\")\[email protected](\n \"--fd\", type=int, default=None, help=\"Bind to socket from this file descriptor.\"\n)\[email protected](\n \"--loop\",\n type=LOOP_CHOICES,\n default=\"auto\",\n help=\"Event loop implementation.\",\n show_default=True,\n)\[email protected](\n \"--http\",\n type=HTTP_CHOICES,\n default=\"auto\",\n help=\"HTTP protocol implementation.\",\n show_default=True,\n)\[email protected](\n \"--ws\",\n type=WS_CHOICES,\n default=\"auto\",\n help=\"WebSocket protocol implementation.\",\n show_default=True,\n)\[email protected](\n \"--wsgi\",\n is_flag=True,\n default=False,\n help=\"Use WSGI as the application interface, instead of ASGI.\",\n)\[email protected](\"--debug\", is_flag=True, default=False, help=\"Enable debug mode.\")\[email protected](\n \"--log-level\",\n type=LEVEL_CHOICES,\n default=\"info\",\n help=\"Log level.\",\n show_default=True,\n)\[email protected](\n \"--no-access-log\", is_flag=True, default=False, help=\"Disable access log.\"\n)\[email protected](\n \"--proxy-headers\",\n is_flag=True,\n default=False,\n help=\"Use X-Forwarded-Proto, X-Forwarded-For, X-Forwarded-Port to populate remote address info.\",\n)\[email protected](\n \"--root-path\",\n type=str,\n default=\"\",\n help=\"Set the ASGI 'root_path' for applications submounted below a given URL path.\",\n)\[email protected](\n \"--limit-concurrency\",\n type=int,\n default=None,\n help=\"Maximum number of concurrent connections or tasks to allow, before issuing HTTP 503 responses.\",\n)\[email protected](\n \"--limit-max-requests\",\n type=int,\n default=None,\n help=\"Maximum number of requests to service before terminating the process.\",\n)\[email protected](\n \"--timeout-keep-alive\",\n type=int,\n default=5,\n help=\"Close Keep-Alive connections if no new data is received within this timeout.\",\n show_default=True,\n)\[email protected](\n \"--disable-lifespan\",\n is_flag=True,\n default=False,\n help=\"Disable lifespan events (such as startup and shutdown) within an ASGI application.\",\n)\ndef main(\n app,\n host: str,\n port: int,\n uds: str,\n fd: int,\n loop: str,\n http: str,\n ws: str,\n wsgi: bool,\n debug: bool,\n log_level: str,\n no_access_log: bool,\n proxy_headers: bool,\n root_path: str,\n limit_concurrency: int,\n limit_max_requests: int,\n timeout_keep_alive: int,\n disable_lifespan: bool,\n):\n sys.path.insert(0, \".\")\n\n kwargs = {\n \"app\": app,\n \"host\": host,\n \"port\": port,\n \"uds\": uds,\n \"fd\": fd,\n \"loop\": loop,\n \"http\": http,\n \"ws\": ws,\n \"log_level\": log_level,\n \"access_log\": not no_access_log,\n \"wsgi\": wsgi,\n \"debug\": debug,\n \"proxy_headers\": proxy_headers,\n \"root_path\": root_path,\n \"limit_concurrency\": limit_concurrency,\n \"limit_max_requests\": limit_max_requests,\n \"timeout_keep_alive\": timeout_keep_alive,\n \"disable_lifespan\": disable_lifespan,\n }\n\n if debug:\n logger = get_logger(log_level)\n reloader = StatReload(logger)\n reloader.run(run, kwargs)\n else:\n run(**kwargs)\n\n\ndef run(**kwargs):\n if 'global_state' in kwargs:\n global_state = kwargs.pop('global_state')\n else:\n global_state = GlobalState()\n\n config = Config(**kwargs)\n\n server = Server(config=config, global_state=global_state)\n server.run()\n\n\nclass Server:\n def __init__(self, config, global_state):\n self.config = config\n self.global_state = global_state\n\n self.app = config.app\n self.host = config.host\n self.port = config.port\n self.uds = config.uds\n self.sock = config.sock\n self.logger = config.logger\n self.loop = config.loop\n self.limit_max_requests = config.limit_max_requests\n self.disable_lifespan = config.disable_lifespan\n self.on_tick = config.http_protocol_class.tick\n self.install_signal_handlers = config.install_signal_handlers\n self.should_exit = False\n self.force_exit = False\n self.pid = os.getpid()\n\n def create_protocol():\n return config.http_protocol_class(\n config=config,\n global_state=global_state\n )\n\n self.create_protocol = create_protocol\n\n def set_signal_handlers(self):\n if not self.install_signal_handlers:\n return\n\n try:\n for sig in HANDLED_SIGNALS:\n self.loop.add_signal_handler(sig, self.handle_exit, sig, None)\n except NotImplementedError as exc:\n # Windows\n for sig in HANDLED_SIGNALS:\n signal.signal(sig, self.handle_exit)\n\n def handle_exit(self, sig, frame):\n if self.should_exit:\n self.force_exit = True\n else:\n self.should_exit = True\n\n def run(self):\n self.logger.info(\"Started server process [{}]\".format(self.pid))\n self.set_signal_handlers()\n if not self.disable_lifespan:\n self.lifespan = Lifespan(self.app, self.logger)\n if self.lifespan.is_enabled:\n self.logger.info(\"Waiting for application startup.\")\n self.loop.create_task(self.lifespan.run())\n self.loop.run_until_complete(self.lifespan.wait_startup())\n if self.lifespan.error_occured:\n self.logger.error(\"Application startup failed. Exiting.\")\n return\n else:\n self.logger.debug(\n \"Lifespan protocol is not recognized by the application\"\n )\n self.loop.run_until_complete(self.create_server())\n self.loop.create_task(self.tick())\n self.global_state.started.set()\n self.loop.run_forever()\n\n async def create_server(self):\n if self.sock is not None:\n # Use an existing socket.\n self.server = await self.loop.create_server(\n self.create_protocol, sock=self.sock\n )\n message = \"Uvicorn running on socket %s (Press CTRL+C to quit)\"\n self.logger.info(message % str(self.sock.getsockname()))\n\n elif self.uds is not None:\n # Create a socket using UNIX domain socket.\n self.server = await self.loop.create_unix_server(\n self.create_protocol, path=self.uds\n )\n message = \"Uvicorn running on unix socket %s (Press CTRL+C to quit)\"\n self.logger.info(message % self.uds)\n\n else:\n # Standard case. Create a socket from a host/port pair.\n self.server = await self.loop.create_server(\n self.create_protocol, host=self.host, port=self.port\n )\n message = \"Uvicorn running on http://%s:%d (Press CTRL+C to quit)\"\n self.logger.info(message % (self.host, self.port))\n\n async def tick(self):\n should_limit_requests = self.limit_max_requests is not None\n\n while not self.should_exit:\n if (\n should_limit_requests\n and self.global_state.total_requests >= self.limit_max_requests\n ):\n break\n self.on_tick()\n await asyncio.sleep(1)\n\n self.logger.info(\"Stopping server process [{}]\".format(self.pid))\n self.server.close()\n await self.server.wait_closed()\n for connection in list(self.global_state.connections):\n connection.shutdown()\n\n await asyncio.sleep(0.1)\n if self.global_state.connections and not self.force_exit:\n self.logger.info(\"Waiting for connections to close. (Press CTRL+C to force quit)\")\n while self.global_state.connections and not self.force_exit:\n await asyncio.sleep(0.1)\n if self.global_state.tasks and not self.force_exit:\n self.logger.info(\"Waiting for background tasks to complete. (Press CTRL+C to force quit)\")\n while self.global_state.tasks and not self.force_exit:\n await asyncio.sleep(0.1)\n\n if not self.disable_lifespan and self.lifespan.is_enabled and not self.force_exit:\n self.logger.info(\"Waiting for application shutdown.\")\n await self.lifespan.wait_shutdown()\n\n if self.force_exit:\n self.logger.info(\"Forced quit.\")\n\n self.loop.stop()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "uvicorn/main.py"}, {"content": "from uvicorn.main import main, run\n\n__version__ = \"0.3.29\"\n__all__ = [\"main\", \"run\"]\n", "path": "uvicorn/__init__.py"}]} | 3,844 | 219 |
gh_patches_debug_35997 | rasdani/github-patches | git_diff | xonsh__xonsh-890 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Spaces in Python installation path causes xonsh.exe launcher to break on Windows
Steps to repeat:
Have Python installed on Windows, to a directory with a space in the name. This can be Program Files if installed for all users, or it can be your user directory if the username has a space in it (NEVER DO THIS, except that people do).
Install xonsh in the normal way using `python setup.py install` (or via pip).
Run scripts/xonsh.exe.
Expected:
xonsh to run
Actual:
Message "failed to create process."
This is clearly a known issue with pip/setuptools:
https://github.com/pypa/setuptools/issues/398
https://github.com/pypa/pip/issues/2783
I may try and pursue getting it fixed at source if I have some time to understand what's actually going on (setuptools has always been a black hole for me). In the meantime, the second of those two issue discussions contains some workaround code that might be appropriate, to get this issue killed off. Happy to submit a PR.
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2 # -*- coding: ascii -*-
3 """The xonsh installer."""
4 # Note: Do not embed any non-ASCII characters in this file until pip has been
5 # fixed. See https://github.com/scopatz/xonsh/issues/487.
6 from __future__ import print_function, unicode_literals
7 import os
8 import sys
9 import json
10
11 try:
12 from tempfile import TemporaryDirectory
13 except ImportError:
14 pass
15
16 try:
17 from setuptools import setup
18 from setuptools.command.sdist import sdist
19 from setuptools.command.install import install
20 from setuptools.command.develop import develop
21 HAVE_SETUPTOOLS = True
22 except ImportError:
23 from distutils.core import setup
24 from distutils.command.sdist import sdist as sdist
25 from distutils.command.install import install as install
26 HAVE_SETUPTOOLS = False
27
28 try:
29 from jupyter_client.kernelspec import KernelSpecManager
30 HAVE_JUPYTER = True
31 except ImportError:
32 HAVE_JUPYTER = False
33
34 from xonsh import __version__ as XONSH_VERSION
35
36 TABLES = ['xonsh/lexer_table.py', 'xonsh/parser_table.py']
37
38
39 def clean_tables():
40 """Remove the lexer/parser modules that are dynamically created."""
41 for f in TABLES:
42 if os.path.isfile(f):
43 os.remove(f)
44 print('Remove ' + f)
45
46
47 def build_tables():
48 """Build the lexer/parser modules."""
49 print('Building lexer and parser tables.')
50 sys.path.insert(0, os.path.dirname(__file__))
51 from xonsh.parser import Parser
52 Parser(lexer_table='lexer_table', yacc_table='parser_table',
53 outputdir='xonsh')
54 sys.path.pop(0)
55
56
57 def install_jupyter_hook(root=None):
58 """Make xonsh available as a Jupyter kernel."""
59 if not HAVE_JUPYTER:
60 print('Could not install Jupyter kernel spec, please install '
61 'Jupyter/IPython.')
62 return
63 spec = {"argv": [sys.executable, "-m", "xonsh.jupyter_kernel",
64 "-f", "{connection_file}"],
65 "display_name": "Xonsh",
66 "language": "xonsh",
67 "codemirror_mode": "shell",
68 }
69 with TemporaryDirectory() as d:
70 os.chmod(d, 0o755) # Starts off as 700, not user readable
71 if sys.platform == 'win32':
72 # Ensure that conda-build detects the hard coded prefix
73 spec['argv'][0] = spec['argv'][0].replace(os.sep, os.altsep)
74 with open(os.path.join(d, 'kernel.json'), 'w') as f:
75 json.dump(spec, f, sort_keys=True)
76 if 'CONDA_BUILD' in os.environ:
77 root = sys.prefix
78 if sys.platform == 'win32':
79 root = root.replace(os.sep, os.altsep)
80 print('Installing Jupyter kernel spec...')
81 KernelSpecManager().install_kernel_spec(
82 d, 'xonsh', user=('--user' in sys.argv), replace=True,
83 prefix=root)
84
85
86 class xinstall(install):
87 """Xonsh specialization of setuptools install class."""
88 def run(self):
89 clean_tables()
90 build_tables()
91 install_jupyter_hook(self.root if self.root else None)
92 install.run(self)
93
94
95 class xsdist(sdist):
96 """Xonsh specialization of setuptools sdist class."""
97 def make_release_tree(self, basedir, files):
98 clean_tables()
99 build_tables()
100 sdist.make_release_tree(self, basedir, files)
101
102
103 if HAVE_SETUPTOOLS:
104 class xdevelop(develop):
105 """Xonsh specialization of setuptools develop class."""
106 def run(self):
107 clean_tables()
108 build_tables()
109 develop.run(self)
110
111
112 def main():
113 """The main entry point."""
114 if sys.version_info[0] < 3:
115 sys.exit('xonsh currently requires Python 3.4+')
116 try:
117 if '--name' not in sys.argv:
118 logo_fname = os.path.join(os.path.dirname(__file__), 'logo.txt')
119 with open(logo_fname, 'rb') as f:
120 logo = f.read().decode('utf-8')
121 print(logo)
122 except UnicodeEncodeError:
123 pass
124 with open(os.path.join(os.path.dirname(__file__), 'README.rst'), 'r') as f:
125 readme = f.read()
126 skw = dict(
127 name='xonsh',
128 description='A general purpose, Python-ish shell',
129 long_description=readme,
130 license='BSD',
131 version=XONSH_VERSION,
132 author='Anthony Scopatz',
133 maintainer='Anthony Scopatz',
134 author_email='[email protected]',
135 url='https://github.com/scopatz/xonsh',
136 platforms='Cross Platform',
137 classifiers=['Programming Language :: Python :: 3'],
138 packages=['xonsh', 'xonsh.ptk', 'xonsh.parsers', 'xontrib'],
139 package_dir={'xonsh': 'xonsh', 'xontrib': 'xontrib'},
140 package_data={'xonsh': ['*.json'], 'xontrib': ['*.xsh']},
141 cmdclass={'install': xinstall, 'sdist': xsdist},
142 )
143 if HAVE_SETUPTOOLS:
144 skw['entry_points'] = {
145 'pygments.lexers': ['xonsh = xonsh.pyghooks:XonshLexer',
146 'xonshcon = xonsh.pyghooks:XonshConsoleLexer',
147 ],
148 'console_scripts': ['xonsh = xonsh.main:main'],
149 }
150 skw['cmdclass']['develop'] = xdevelop
151 else:
152 skw['scripts'] = ['scripts/xonsh'] if 'win' not in sys.platform else ['scripts/xonsh.bat'],
153
154 setup(**skw)
155
156
157 if __name__ == '__main__':
158 main()
159
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,11 +18,13 @@
from setuptools.command.sdist import sdist
from setuptools.command.install import install
from setuptools.command.develop import develop
+ from setuptools.command.install_scripts import install_scripts
HAVE_SETUPTOOLS = True
except ImportError:
from distutils.core import setup
from distutils.command.sdist import sdist as sdist
from distutils.command.install import install as install
+ from distutils.command.install_scripts import install_scripts
HAVE_SETUPTOOLS = False
try:
@@ -100,6 +102,29 @@
sdist.make_release_tree(self, basedir, files)
+#-----------------------------------------------------------------------------
+# Hack to overcome pip/setuptools problem on Win 10. See:
+# https://github.com/tomduck/pandoc-eqnos/issues/6
+# https://github.com/pypa/pip/issues/2783
+
+# Custom install_scripts command class for setup()
+class install_scripts_quoted_shebang(install_scripts):
+ """Ensure there are quotes around shebang paths with spaces."""
+ def write_script(self, script_name, contents, mode="t", *ignored):
+ shebang = str(contents.splitlines()[0])
+ if shebang.startswith('#!') and ' ' in shebang[2:].strip() \
+ and '"' not in shebang:
+ quoted_shebang = '#!"%s"' % shebang[2:].strip()
+ contents = contents.replace(shebang, quoted_shebang)
+ super().write_script(script_name, contents, mode, *ignored)
+
+# The custom install needs to be used on Windows machines
+if os.name == 'nt':
+ cmdclass = {'install': xinstall, 'sdist': xsdist, 'install_scripts': install_scripts_quoted_shebang}
+else:
+ cmdclass = {'install': xinstall, 'sdist': xsdist}
+
+
if HAVE_SETUPTOOLS:
class xdevelop(develop):
"""Xonsh specialization of setuptools develop class."""
@@ -138,7 +163,7 @@
packages=['xonsh', 'xonsh.ptk', 'xonsh.parsers', 'xontrib'],
package_dir={'xonsh': 'xonsh', 'xontrib': 'xontrib'},
package_data={'xonsh': ['*.json'], 'xontrib': ['*.xsh']},
- cmdclass={'install': xinstall, 'sdist': xsdist},
+ cmdclass=cmdclass
)
if HAVE_SETUPTOOLS:
skw['entry_points'] = {
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,11 +18,13 @@\n from setuptools.command.sdist import sdist\n from setuptools.command.install import install\n from setuptools.command.develop import develop\n+ from setuptools.command.install_scripts import install_scripts\n HAVE_SETUPTOOLS = True\n except ImportError:\n from distutils.core import setup\n from distutils.command.sdist import sdist as sdist\n from distutils.command.install import install as install\n+ from distutils.command.install_scripts import install_scripts\n HAVE_SETUPTOOLS = False\n \n try:\n@@ -100,6 +102,29 @@\n sdist.make_release_tree(self, basedir, files)\n \n \n+#-----------------------------------------------------------------------------\n+# Hack to overcome pip/setuptools problem on Win 10. See:\n+# https://github.com/tomduck/pandoc-eqnos/issues/6\n+# https://github.com/pypa/pip/issues/2783\n+\n+# Custom install_scripts command class for setup()\n+class install_scripts_quoted_shebang(install_scripts):\n+ \"\"\"Ensure there are quotes around shebang paths with spaces.\"\"\"\n+ def write_script(self, script_name, contents, mode=\"t\", *ignored):\n+ shebang = str(contents.splitlines()[0])\n+ if shebang.startswith('#!') and ' ' in shebang[2:].strip() \\\n+ and '\"' not in shebang:\n+ quoted_shebang = '#!\"%s\"' % shebang[2:].strip()\n+ contents = contents.replace(shebang, quoted_shebang)\n+ super().write_script(script_name, contents, mode, *ignored)\n+\n+# The custom install needs to be used on Windows machines\n+if os.name == 'nt':\n+ cmdclass = {'install': xinstall, 'sdist': xsdist, 'install_scripts': install_scripts_quoted_shebang}\n+else:\n+ cmdclass = {'install': xinstall, 'sdist': xsdist}\n+\n+\n if HAVE_SETUPTOOLS:\n class xdevelop(develop):\n \"\"\"Xonsh specialization of setuptools develop class.\"\"\"\n@@ -138,7 +163,7 @@\n packages=['xonsh', 'xonsh.ptk', 'xonsh.parsers', 'xontrib'],\n package_dir={'xonsh': 'xonsh', 'xontrib': 'xontrib'},\n package_data={'xonsh': ['*.json'], 'xontrib': ['*.xsh']},\n- cmdclass={'install': xinstall, 'sdist': xsdist},\n+ cmdclass=cmdclass\n )\n if HAVE_SETUPTOOLS:\n skw['entry_points'] = {\n", "issue": "Spaces in Python installation path causes xonsh.exe launcher to break on Windows\nSteps to repeat:\nHave Python installed on Windows, to a directory with a space in the name. This can be Program Files if installed for all users, or it can be your user directory if the username has a space in it (NEVER DO THIS, except that people do).\nInstall xonsh in the normal way using `python setup.py install` (or via pip).\nRun scripts/xonsh.exe.\n\nExpected:\nxonsh to run\n\nActual:\nMessage \"failed to create process.\"\n\nThis is clearly a known issue with pip/setuptools:\nhttps://github.com/pypa/setuptools/issues/398\nhttps://github.com/pypa/pip/issues/2783\n\nI may try and pursue getting it fixed at source if I have some time to understand what's actually going on (setuptools has always been a black hole for me). In the meantime, the second of those two issue discussions contains some workaround code that might be appropriate, to get this issue killed off. Happy to submit a PR.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: ascii -*-\n\"\"\"The xonsh installer.\"\"\"\n# Note: Do not embed any non-ASCII characters in this file until pip has been\n# fixed. See https://github.com/scopatz/xonsh/issues/487.\nfrom __future__ import print_function, unicode_literals\nimport os\nimport sys\nimport json\n\ntry:\n from tempfile import TemporaryDirectory\nexcept ImportError:\n pass\n\ntry:\n from setuptools import setup\n from setuptools.command.sdist import sdist\n from setuptools.command.install import install\n from setuptools.command.develop import develop\n HAVE_SETUPTOOLS = True\nexcept ImportError:\n from distutils.core import setup\n from distutils.command.sdist import sdist as sdist\n from distutils.command.install import install as install\n HAVE_SETUPTOOLS = False\n\ntry:\n from jupyter_client.kernelspec import KernelSpecManager\n HAVE_JUPYTER = True\nexcept ImportError:\n HAVE_JUPYTER = False\n\nfrom xonsh import __version__ as XONSH_VERSION\n\nTABLES = ['xonsh/lexer_table.py', 'xonsh/parser_table.py']\n\n\ndef clean_tables():\n \"\"\"Remove the lexer/parser modules that are dynamically created.\"\"\"\n for f in TABLES:\n if os.path.isfile(f):\n os.remove(f)\n print('Remove ' + f)\n\n\ndef build_tables():\n \"\"\"Build the lexer/parser modules.\"\"\"\n print('Building lexer and parser tables.')\n sys.path.insert(0, os.path.dirname(__file__))\n from xonsh.parser import Parser\n Parser(lexer_table='lexer_table', yacc_table='parser_table',\n outputdir='xonsh')\n sys.path.pop(0)\n\n\ndef install_jupyter_hook(root=None):\n \"\"\"Make xonsh available as a Jupyter kernel.\"\"\"\n if not HAVE_JUPYTER:\n print('Could not install Jupyter kernel spec, please install '\n 'Jupyter/IPython.')\n return\n spec = {\"argv\": [sys.executable, \"-m\", \"xonsh.jupyter_kernel\",\n \"-f\", \"{connection_file}\"],\n \"display_name\": \"Xonsh\",\n \"language\": \"xonsh\",\n \"codemirror_mode\": \"shell\",\n }\n with TemporaryDirectory() as d:\n os.chmod(d, 0o755) # Starts off as 700, not user readable\n if sys.platform == 'win32':\n # Ensure that conda-build detects the hard coded prefix\n spec['argv'][0] = spec['argv'][0].replace(os.sep, os.altsep)\n with open(os.path.join(d, 'kernel.json'), 'w') as f:\n json.dump(spec, f, sort_keys=True)\n if 'CONDA_BUILD' in os.environ:\n root = sys.prefix\n if sys.platform == 'win32':\n root = root.replace(os.sep, os.altsep)\n print('Installing Jupyter kernel spec...')\n KernelSpecManager().install_kernel_spec(\n d, 'xonsh', user=('--user' in sys.argv), replace=True,\n prefix=root)\n\n\nclass xinstall(install):\n \"\"\"Xonsh specialization of setuptools install class.\"\"\"\n def run(self):\n clean_tables()\n build_tables()\n install_jupyter_hook(self.root if self.root else None)\n install.run(self)\n\n\nclass xsdist(sdist):\n \"\"\"Xonsh specialization of setuptools sdist class.\"\"\"\n def make_release_tree(self, basedir, files):\n clean_tables()\n build_tables()\n sdist.make_release_tree(self, basedir, files)\n\n\nif HAVE_SETUPTOOLS:\n class xdevelop(develop):\n \"\"\"Xonsh specialization of setuptools develop class.\"\"\"\n def run(self):\n clean_tables()\n build_tables()\n develop.run(self)\n\n\ndef main():\n \"\"\"The main entry point.\"\"\"\n if sys.version_info[0] < 3:\n sys.exit('xonsh currently requires Python 3.4+')\n try:\n if '--name' not in sys.argv:\n logo_fname = os.path.join(os.path.dirname(__file__), 'logo.txt')\n with open(logo_fname, 'rb') as f:\n logo = f.read().decode('utf-8')\n print(logo)\n except UnicodeEncodeError:\n pass\n with open(os.path.join(os.path.dirname(__file__), 'README.rst'), 'r') as f:\n readme = f.read()\n skw = dict(\n name='xonsh',\n description='A general purpose, Python-ish shell',\n long_description=readme,\n license='BSD',\n version=XONSH_VERSION,\n author='Anthony Scopatz',\n maintainer='Anthony Scopatz',\n author_email='[email protected]',\n url='https://github.com/scopatz/xonsh',\n platforms='Cross Platform',\n classifiers=['Programming Language :: Python :: 3'],\n packages=['xonsh', 'xonsh.ptk', 'xonsh.parsers', 'xontrib'],\n package_dir={'xonsh': 'xonsh', 'xontrib': 'xontrib'},\n package_data={'xonsh': ['*.json'], 'xontrib': ['*.xsh']},\n cmdclass={'install': xinstall, 'sdist': xsdist},\n )\n if HAVE_SETUPTOOLS:\n skw['entry_points'] = {\n 'pygments.lexers': ['xonsh = xonsh.pyghooks:XonshLexer',\n 'xonshcon = xonsh.pyghooks:XonshConsoleLexer',\n ],\n 'console_scripts': ['xonsh = xonsh.main:main'],\n }\n skw['cmdclass']['develop'] = xdevelop\n else:\n skw['scripts'] = ['scripts/xonsh'] if 'win' not in sys.platform else ['scripts/xonsh.bat'],\n\n setup(**skw)\n\n\nif __name__ == '__main__':\n main()\n", "path": "setup.py"}]} | 2,411 | 596 |
gh_patches_debug_15284 | rasdani/github-patches | git_diff | ktbyers__netmiko-3215 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
tplink "Pattern not detected: '[>#]' in output." in version 4.0.0 and above
Hello!
An error appears when connecting to tplink in version 4.0.0 and above, but in version 3.4.0 there is no error and the result is displayed
**Code:**
```
device = {
"device_type": "tplink_jetstream",
"host": host,
"username": param["ssh_user"],
"password": param["ssh_pass"],
}
with ConnectHandler(**device) as ssh:
ssh.enable()
output_before = ssh.send_command("sh system-info")
print(output_before)
```
**Error:**
```
Pattern not detected: '[>#]' in output.
Things you might try to fix this:
1. Adjust the regex pattern to better identify the terminating string. Note, in
many situations the pattern is automatically based on the network device's prompt.
2. Increase the read_timeout to a larger value.
You can also look at the Netmiko session_log or debug log for more information.
```
**Debug:**
```
DEBUG:paramiko.transport:starting thread (client mode): 0x30757d50
DEBUG:paramiko.transport:Local version/idstring: SSH-2.0-paramiko_2.12.0
DEBUG:paramiko.transport:Remote version/idstring: SSH-1.99-IPSSH-6.6.0
INFO:paramiko.transport:Connected (version 1.99, client IPSSH-6.6.0)
DEBUG:paramiko.transport:=== Key exchange possibilities ===
DEBUG:paramiko.transport:kex algos: diffie-hellman-group1-sha1
DEBUG:paramiko.transport:server key: ssh-dss
DEBUG:paramiko.transport:client encrypt: aes128-cbc, aes192-cbc, aes256-cbc, blowfish-cbc, cast128-cbc, 3des-cbc
DEBUG:paramiko.transport:server encrypt: aes128-cbc, aes192-cbc, aes256-cbc, blowfish-cbc, cast128-cbc, 3des-cbc
DEBUG:paramiko.transport:client mac: hmac-sha1, hmac-sha1-96, hmac-md5, hmac-md5-96
DEBUG:paramiko.transport:server mac: hmac-sha1, hmac-sha1-96, hmac-md5, hmac-md5-96
DEBUG:paramiko.transport:client compress: none
DEBUG:paramiko.transport:server compress: none
DEBUG:paramiko.transport:client lang: <none>
DEBUG:paramiko.transport:server lang: <none>
DEBUG:paramiko.transport:kex follows: False
DEBUG:paramiko.transport:=== Key exchange agreements ===
DEBUG:paramiko.transport:Kex: diffie-hellman-group1-sha1
DEBUG:paramiko.transport:HostKey: ssh-dss
DEBUG:paramiko.transport:Cipher: aes128-cbc
DEBUG:paramiko.transport:MAC: hmac-sha1
DEBUG:paramiko.transport:Compression: none
DEBUG:paramiko.transport:=== End of kex handshake ===
DEBUG:paramiko.transport:kex engine KexGroup1 specified hash_algo <built-in function openssl_sha1>
DEBUG:paramiko.transport:Switch to new keys ...
DEBUG:paramiko.transport:Adding ssh-dss host key for 10.10.10.150: b'6e3ca009ab027terd8aa7e40d0516ef8'
DEBUG:paramiko.transport:userauth is OK
INFO:paramiko.transport:Authentication (password) successful!
DEBUG:paramiko.transport:[chan 0] Max packet in: 32768 bytes
DEBUG:paramiko.transport:[chan 0] Max packet out: 32768 bytes
DEBUG:paramiko.transport:Secsh channel 0 opened.
DEBUG:paramiko.transport:[chan 0] Sesch channel 0 request ok
DEBUG:paramiko.transport:[chan 0] Sesch channel 0 request ok
DEBUG:netmiko:write_channel: b'\r\n'
DEBUG:netmiko:read_channel:
tp3428-test>
tp3428-test>
DEBUG:netmiko:Clear buffer detects data in the channel
DEBUG:netmiko:read_channel:
DEBUG:netmiko:read_channel:
DEBUG:netmiko:read_channel:
...
EBUG:netmiko:read_channel:
DEBUG:netmiko:write_channel: b'\r\n'
DEBUG:netmiko:read_channel:
DEBUG:netmiko:read_channel:
tp3428-test>
DEBUG:netmiko:read_channel:
DEBUG:netmiko:read_channel:
DEBUG:netmiko:read_channel:
DEBUG:netmiko:read_channel:
DEBUG:netmiko:read_channel:
DEBUG:netmiko:read_channel:
....
DEBUG:netmiko:read_channel:
DEBUG:netmiko:read_channel:
DEBUG:netmiko:read_channel:
DEBUG:netmiko:read_channel:
DEBUG:netmiko:read_channel:
DEBUG:netmiko:read_channel:
DEBUG:netmiko:write_channel: b'exit\r\n'
```
In version 3.4.0, the same code works without errors:
```
ThreadPoolExecutor-0_0 root INFO: ===> 18:53:05.075551 Connection: 10.10.10.150
System Description - JetStream 24-Port Gigabit L2+ Managed Switch with 4 10GE SFP+ Slots
System Name - tp3428-test
System Location - Hong Kong
Contact Information - www.tp-link.com
Hardware Version - TL-SG3428X 1.0
Bootloader Version - TP-LINK BOOTUTIL(v1.0.0)
System Time - 2023-02-22 18:53:16
Running Time - 0 day - 5 hour - 20 min - 12 sec
```
</issue>
<code>
[start of netmiko/tplink/tplink_jetstream.py]
1 import re
2 import time
3 from typing import Any, Optional
4
5 from cryptography.hazmat.primitives.asymmetric import dsa
6 from cryptography.hazmat.primitives.asymmetric.dsa import DSAParameterNumbers
7
8 from netmiko import log
9 from netmiko.cisco_base_connection import CiscoSSHConnection
10 from netmiko.exceptions import ReadTimeout
11
12
13 class TPLinkJetStreamBase(CiscoSSHConnection):
14 def __init__(self, **kwargs: Any) -> None:
15 # TP-Link doesn't have a way to set terminal width which breaks cmd_verify
16 if kwargs.get("global_cmd_verify") is None:
17 kwargs["global_cmd_verify"] = False
18 # TP-Link uses "\r\n" as default_enter for SSH and Telnet
19 if kwargs.get("default_enter") is None:
20 kwargs["default_enter"] = "\r\n"
21 return super().__init__(**kwargs)
22
23 def session_preparation(self) -> None:
24 """
25 Prepare the session after the connection has been established.
26 """
27 delay_factor = self.select_delay_factor(delay_factor=0)
28 time.sleep(0.3 * delay_factor)
29 self.clear_buffer()
30 self._test_channel_read(pattern=r"[>#]")
31 self.set_base_prompt()
32 self.enable()
33 self.disable_paging()
34 # Clear the read buffer
35 time.sleep(0.3 * self.global_delay_factor)
36 self.clear_buffer()
37
38 def enable(
39 self,
40 cmd: str = "",
41 pattern: str = "ssword",
42 enable_pattern: Optional[str] = None,
43 check_state: bool = True,
44 re_flags: int = re.IGNORECASE,
45 ) -> str:
46 """
47 TPLink JetStream requires you to first execute "enable" and then execute "enable-admin".
48 This is necessary as "configure" is generally only available at "enable-admin" level
49
50 If the user does not have the Admin role, he will need to execute enable-admin to really
51 enable all functions.
52 """
53
54 msg = """
55 Failed to enter enable mode. Please ensure you pass
56 the 'secret' argument to ConnectHandler.
57 """
58
59 # If end-user passes in "cmd" execute that using normal process.
60 if cmd:
61 return super().enable(
62 cmd=cmd,
63 pattern=pattern,
64 enable_pattern=enable_pattern,
65 check_state=check_state,
66 re_flags=re_flags,
67 )
68
69 output = ""
70 if check_state and self.check_enable_mode():
71 return output
72
73 for cmd in ("enable", "enable-admin"):
74 self.write_channel(self.normalize_cmd(cmd))
75 try:
76 new_data = self.read_until_prompt_or_pattern(
77 pattern=pattern, re_flags=re_flags, read_entire_line=True
78 )
79 output += new_data
80 if re.search(pattern, new_data):
81 self.write_channel(self.normalize_cmd(self.secret))
82 output += self.read_until_prompt(read_entire_line=True)
83 except ReadTimeout:
84 raise ValueError(msg)
85
86 if not self.check_enable_mode():
87 raise ValueError(msg)
88 return output
89
90 def config_mode(
91 self, config_command: str = "configure", pattern: str = "", re_flags: int = 0
92 ) -> str:
93 return super().config_mode(
94 config_command=config_command, pattern=pattern, re_flags=re_flags
95 )
96
97 def exit_config_mode(self, exit_config: str = "exit", pattern: str = r"#") -> str:
98 """
99 Exit config mode.
100
101 Like the Mellanox equipment, the TP-Link Jetstream does not
102 support a single command to completely exit the configuration mode.
103
104 Consequently, need to keep checking and sending "exit".
105 """
106 output = ""
107 check_count = 12
108 while check_count >= 0:
109 if self.check_config_mode():
110 self.write_channel(self.normalize_cmd(exit_config))
111 output += self.read_until_pattern(pattern=pattern)
112 else:
113 break
114 check_count -= 1
115
116 if self.check_config_mode():
117 raise ValueError("Failed to exit configuration mode")
118 log.debug(f"exit_config_mode: {output}")
119
120 return output
121
122 def check_config_mode(
123 self,
124 check_string: str = "(config",
125 pattern: str = r"#",
126 force_regex: bool = False,
127 ) -> bool:
128 """Check whether device is in configuration mode. Return a boolean."""
129 return super().check_config_mode(check_string=check_string, pattern=pattern)
130
131 def set_base_prompt(
132 self,
133 pri_prompt_terminator: str = ">",
134 alt_prompt_terminator: str = "#",
135 delay_factor: float = 1.0,
136 pattern: Optional[str] = None,
137 ) -> str:
138 """
139 Sets self.base_prompt
140
141 Used as delimiter for stripping of trailing prompt in output.
142
143 Should be set to something that is general and applies in multiple
144 contexts. For TP-Link this will be the router prompt with > or #
145 stripped off.
146
147 This will be set on logging in, but not when entering system-view
148 """
149 return super().set_base_prompt(
150 pri_prompt_terminator=pri_prompt_terminator,
151 alt_prompt_terminator=alt_prompt_terminator,
152 delay_factor=delay_factor,
153 pattern=pattern,
154 )
155
156
157 class TPLinkJetStreamSSH(TPLinkJetStreamBase):
158 def __init__(self, **kwargs: Any) -> None:
159 setattr(dsa, "_check_dsa_parameters", self._override_check_dsa_parameters)
160 return super().__init__(**kwargs)
161
162 def _override_check_dsa_parameters(self, parameters: DSAParameterNumbers) -> None:
163 """
164 Override check_dsa_parameters from cryptography's dsa.py
165
166 Without this the error below occurs:
167
168 ValueError: p must be exactly 1024, 2048, or 3072 bits long
169
170 Allows for shorter or longer parameters.p to be returned
171 from the server's host key. This is a HORRIBLE hack and a
172 security risk, please remove if possible!
173
174 By now, with firmware:
175
176 2.0.5 Build 20200109 Rel.36203(s)
177
178 It's still not possible to remove this hack.
179 """
180 if parameters.q.bit_length() not in [160, 256]:
181 raise ValueError("q must be exactly 160 or 256 bits long")
182
183 if not (1 < parameters.g < parameters.p):
184 raise ValueError("g, p don't satisfy 1 < g < p.")
185
186
187 class TPLinkJetStreamTelnet(TPLinkJetStreamBase):
188 def telnet_login(
189 self,
190 pri_prompt_terminator: str = "#",
191 alt_prompt_terminator: str = ">",
192 username_pattern: str = r"User:",
193 pwd_pattern: str = r"Password:",
194 delay_factor: float = 1.0,
195 max_loops: int = 60,
196 ) -> str:
197 """Telnet login: can be username/password or just password."""
198 return super().telnet_login(
199 pri_prompt_terminator=pri_prompt_terminator,
200 alt_prompt_terminator=alt_prompt_terminator,
201 username_pattern=username_pattern,
202 pwd_pattern=pwd_pattern,
203 delay_factor=delay_factor,
204 max_loops=max_loops,
205 )
206
[end of netmiko/tplink/tplink_jetstream.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/netmiko/tplink/tplink_jetstream.py b/netmiko/tplink/tplink_jetstream.py
--- a/netmiko/tplink/tplink_jetstream.py
+++ b/netmiko/tplink/tplink_jetstream.py
@@ -21,15 +21,12 @@
return super().__init__(**kwargs)
def session_preparation(self) -> None:
- """
- Prepare the session after the connection has been established.
- """
delay_factor = self.select_delay_factor(delay_factor=0)
time.sleep(0.3 * delay_factor)
- self.clear_buffer()
self._test_channel_read(pattern=r"[>#]")
self.set_base_prompt()
self.enable()
+ self.set_base_prompt()
self.disable_paging()
# Clear the read buffer
time.sleep(0.3 * self.global_delay_factor)
| {"golden_diff": "diff --git a/netmiko/tplink/tplink_jetstream.py b/netmiko/tplink/tplink_jetstream.py\n--- a/netmiko/tplink/tplink_jetstream.py\n+++ b/netmiko/tplink/tplink_jetstream.py\n@@ -21,15 +21,12 @@\n return super().__init__(**kwargs)\n \n def session_preparation(self) -> None:\n- \"\"\"\n- Prepare the session after the connection has been established.\n- \"\"\"\n delay_factor = self.select_delay_factor(delay_factor=0)\n time.sleep(0.3 * delay_factor)\n- self.clear_buffer()\n self._test_channel_read(pattern=r\"[>#]\")\n self.set_base_prompt()\n self.enable()\n+ self.set_base_prompt()\n self.disable_paging()\n # Clear the read buffer\n time.sleep(0.3 * self.global_delay_factor)\n", "issue": "tplink \"Pattern not detected: '[>#]' in output.\" in version 4.0.0 and above\nHello!\r\n\r\nAn error appears when connecting to tplink in version 4.0.0 and above, but in version 3.4.0 there is no error and the result is displayed\r\n\r\n**Code:**\r\n```\r\ndevice = {\r\n \"device_type\": \"tplink_jetstream\",\r\n \"host\": host,\r\n \"username\": param[\"ssh_user\"],\r\n \"password\": param[\"ssh_pass\"],\r\n}\r\nwith ConnectHandler(**device) as ssh:\r\n ssh.enable()\r\n output_before = ssh.send_command(\"sh system-info\")\r\n print(output_before)\r\n```\r\n\r\n**Error:**\r\n```\r\nPattern not detected: '[>#]' in output.\r\n\r\nThings you might try to fix this:\r\n1. Adjust the regex pattern to better identify the terminating string. Note, in\r\nmany situations the pattern is automatically based on the network device's prompt.\r\n2. Increase the read_timeout to a larger value.\r\n\r\nYou can also look at the Netmiko session_log or debug log for more information.\r\n```\r\n**Debug:**\r\n```\r\nDEBUG:paramiko.transport:starting thread (client mode): 0x30757d50\r\nDEBUG:paramiko.transport:Local version/idstring: SSH-2.0-paramiko_2.12.0\r\nDEBUG:paramiko.transport:Remote version/idstring: SSH-1.99-IPSSH-6.6.0\r\nINFO:paramiko.transport:Connected (version 1.99, client IPSSH-6.6.0)\r\nDEBUG:paramiko.transport:=== Key exchange possibilities ===\r\nDEBUG:paramiko.transport:kex algos: diffie-hellman-group1-sha1\r\nDEBUG:paramiko.transport:server key: ssh-dss\r\nDEBUG:paramiko.transport:client encrypt: aes128-cbc, aes192-cbc, aes256-cbc, blowfish-cbc, cast128-cbc, 3des-cbc\r\nDEBUG:paramiko.transport:server encrypt: aes128-cbc, aes192-cbc, aes256-cbc, blowfish-cbc, cast128-cbc, 3des-cbc\r\nDEBUG:paramiko.transport:client mac: hmac-sha1, hmac-sha1-96, hmac-md5, hmac-md5-96\r\nDEBUG:paramiko.transport:server mac: hmac-sha1, hmac-sha1-96, hmac-md5, hmac-md5-96\r\nDEBUG:paramiko.transport:client compress: none\r\nDEBUG:paramiko.transport:server compress: none\r\nDEBUG:paramiko.transport:client lang: <none>\r\nDEBUG:paramiko.transport:server lang: <none>\r\nDEBUG:paramiko.transport:kex follows: False\r\nDEBUG:paramiko.transport:=== Key exchange agreements ===\r\nDEBUG:paramiko.transport:Kex: diffie-hellman-group1-sha1\r\nDEBUG:paramiko.transport:HostKey: ssh-dss\r\nDEBUG:paramiko.transport:Cipher: aes128-cbc\r\nDEBUG:paramiko.transport:MAC: hmac-sha1\r\nDEBUG:paramiko.transport:Compression: none\r\nDEBUG:paramiko.transport:=== End of kex handshake ===\r\nDEBUG:paramiko.transport:kex engine KexGroup1 specified hash_algo <built-in function openssl_sha1>\r\nDEBUG:paramiko.transport:Switch to new keys ...\r\nDEBUG:paramiko.transport:Adding ssh-dss host key for 10.10.10.150: b'6e3ca009ab027terd8aa7e40d0516ef8'\r\nDEBUG:paramiko.transport:userauth is OK\r\nINFO:paramiko.transport:Authentication (password) successful!\r\nDEBUG:paramiko.transport:[chan 0] Max packet in: 32768 bytes\r\nDEBUG:paramiko.transport:[chan 0] Max packet out: 32768 bytes\r\nDEBUG:paramiko.transport:Secsh channel 0 opened.\r\nDEBUG:paramiko.transport:[chan 0] Sesch channel 0 request ok\r\nDEBUG:paramiko.transport:[chan 0] Sesch channel 0 request ok\r\nDEBUG:netmiko:write_channel: b'\\r\\n'\r\nDEBUG:netmiko:read_channel: \r\n\r\ntp3428-test>\r\n\r\n\r\ntp3428-test>\r\nDEBUG:netmiko:Clear buffer detects data in the channel\r\nDEBUG:netmiko:read_channel: \r\nDEBUG:netmiko:read_channel: \r\nDEBUG:netmiko:read_channel: \r\n...\r\nEBUG:netmiko:read_channel: \r\nDEBUG:netmiko:write_channel: b'\\r\\n'\r\nDEBUG:netmiko:read_channel: \r\nDEBUG:netmiko:read_channel: \r\n\r\n\r\ntp3428-test>\r\nDEBUG:netmiko:read_channel: \r\nDEBUG:netmiko:read_channel: \r\nDEBUG:netmiko:read_channel: \r\nDEBUG:netmiko:read_channel: \r\nDEBUG:netmiko:read_channel: \r\nDEBUG:netmiko:read_channel: \r\n....\r\nDEBUG:netmiko:read_channel: \r\nDEBUG:netmiko:read_channel: \r\nDEBUG:netmiko:read_channel: \r\nDEBUG:netmiko:read_channel: \r\nDEBUG:netmiko:read_channel: \r\nDEBUG:netmiko:read_channel: \r\nDEBUG:netmiko:write_channel: b'exit\\r\\n'\r\n```\r\nIn version 3.4.0, the same code works without errors:\r\n```\r\nThreadPoolExecutor-0_0 root INFO: ===> 18:53:05.075551 Connection: 10.10.10.150\r\n System Description - JetStream 24-Port Gigabit L2+ Managed Switch with 4 10GE SFP+ Slots\r\n System Name - tp3428-test\r\n System Location - Hong Kong\r\n Contact Information - www.tp-link.com\r\n Hardware Version - TL-SG3428X 1.0\r\n Bootloader Version - TP-LINK BOOTUTIL(v1.0.0)\r\n System Time - 2023-02-22 18:53:16\r\n Running Time - 0 day - 5 hour - 20 min - 12 sec\r\n\r\n```\r\n\n", "before_files": [{"content": "import re\nimport time\nfrom typing import Any, Optional\n\nfrom cryptography.hazmat.primitives.asymmetric import dsa\nfrom cryptography.hazmat.primitives.asymmetric.dsa import DSAParameterNumbers\n\nfrom netmiko import log\nfrom netmiko.cisco_base_connection import CiscoSSHConnection\nfrom netmiko.exceptions import ReadTimeout\n\n\nclass TPLinkJetStreamBase(CiscoSSHConnection):\n def __init__(self, **kwargs: Any) -> None:\n # TP-Link doesn't have a way to set terminal width which breaks cmd_verify\n if kwargs.get(\"global_cmd_verify\") is None:\n kwargs[\"global_cmd_verify\"] = False\n # TP-Link uses \"\\r\\n\" as default_enter for SSH and Telnet\n if kwargs.get(\"default_enter\") is None:\n kwargs[\"default_enter\"] = \"\\r\\n\"\n return super().__init__(**kwargs)\n\n def session_preparation(self) -> None:\n \"\"\"\n Prepare the session after the connection has been established.\n \"\"\"\n delay_factor = self.select_delay_factor(delay_factor=0)\n time.sleep(0.3 * delay_factor)\n self.clear_buffer()\n self._test_channel_read(pattern=r\"[>#]\")\n self.set_base_prompt()\n self.enable()\n self.disable_paging()\n # Clear the read buffer\n time.sleep(0.3 * self.global_delay_factor)\n self.clear_buffer()\n\n def enable(\n self,\n cmd: str = \"\",\n pattern: str = \"ssword\",\n enable_pattern: Optional[str] = None,\n check_state: bool = True,\n re_flags: int = re.IGNORECASE,\n ) -> str:\n \"\"\"\n TPLink JetStream requires you to first execute \"enable\" and then execute \"enable-admin\".\n This is necessary as \"configure\" is generally only available at \"enable-admin\" level\n\n If the user does not have the Admin role, he will need to execute enable-admin to really\n enable all functions.\n \"\"\"\n\n msg = \"\"\"\nFailed to enter enable mode. Please ensure you pass\nthe 'secret' argument to ConnectHandler.\n\"\"\"\n\n # If end-user passes in \"cmd\" execute that using normal process.\n if cmd:\n return super().enable(\n cmd=cmd,\n pattern=pattern,\n enable_pattern=enable_pattern,\n check_state=check_state,\n re_flags=re_flags,\n )\n\n output = \"\"\n if check_state and self.check_enable_mode():\n return output\n\n for cmd in (\"enable\", \"enable-admin\"):\n self.write_channel(self.normalize_cmd(cmd))\n try:\n new_data = self.read_until_prompt_or_pattern(\n pattern=pattern, re_flags=re_flags, read_entire_line=True\n )\n output += new_data\n if re.search(pattern, new_data):\n self.write_channel(self.normalize_cmd(self.secret))\n output += self.read_until_prompt(read_entire_line=True)\n except ReadTimeout:\n raise ValueError(msg)\n\n if not self.check_enable_mode():\n raise ValueError(msg)\n return output\n\n def config_mode(\n self, config_command: str = \"configure\", pattern: str = \"\", re_flags: int = 0\n ) -> str:\n return super().config_mode(\n config_command=config_command, pattern=pattern, re_flags=re_flags\n )\n\n def exit_config_mode(self, exit_config: str = \"exit\", pattern: str = r\"#\") -> str:\n \"\"\"\n Exit config mode.\n\n Like the Mellanox equipment, the TP-Link Jetstream does not\n support a single command to completely exit the configuration mode.\n\n Consequently, need to keep checking and sending \"exit\".\n \"\"\"\n output = \"\"\n check_count = 12\n while check_count >= 0:\n if self.check_config_mode():\n self.write_channel(self.normalize_cmd(exit_config))\n output += self.read_until_pattern(pattern=pattern)\n else:\n break\n check_count -= 1\n\n if self.check_config_mode():\n raise ValueError(\"Failed to exit configuration mode\")\n log.debug(f\"exit_config_mode: {output}\")\n\n return output\n\n def check_config_mode(\n self,\n check_string: str = \"(config\",\n pattern: str = r\"#\",\n force_regex: bool = False,\n ) -> bool:\n \"\"\"Check whether device is in configuration mode. Return a boolean.\"\"\"\n return super().check_config_mode(check_string=check_string, pattern=pattern)\n\n def set_base_prompt(\n self,\n pri_prompt_terminator: str = \">\",\n alt_prompt_terminator: str = \"#\",\n delay_factor: float = 1.0,\n pattern: Optional[str] = None,\n ) -> str:\n \"\"\"\n Sets self.base_prompt\n\n Used as delimiter for stripping of trailing prompt in output.\n\n Should be set to something that is general and applies in multiple\n contexts. For TP-Link this will be the router prompt with > or #\n stripped off.\n\n This will be set on logging in, but not when entering system-view\n \"\"\"\n return super().set_base_prompt(\n pri_prompt_terminator=pri_prompt_terminator,\n alt_prompt_terminator=alt_prompt_terminator,\n delay_factor=delay_factor,\n pattern=pattern,\n )\n\n\nclass TPLinkJetStreamSSH(TPLinkJetStreamBase):\n def __init__(self, **kwargs: Any) -> None:\n setattr(dsa, \"_check_dsa_parameters\", self._override_check_dsa_parameters)\n return super().__init__(**kwargs)\n\n def _override_check_dsa_parameters(self, parameters: DSAParameterNumbers) -> None:\n \"\"\"\n Override check_dsa_parameters from cryptography's dsa.py\n\n Without this the error below occurs:\n\n ValueError: p must be exactly 1024, 2048, or 3072 bits long\n\n Allows for shorter or longer parameters.p to be returned\n from the server's host key. This is a HORRIBLE hack and a\n security risk, please remove if possible!\n\n By now, with firmware:\n\n 2.0.5 Build 20200109 Rel.36203(s)\n\n It's still not possible to remove this hack.\n \"\"\"\n if parameters.q.bit_length() not in [160, 256]:\n raise ValueError(\"q must be exactly 160 or 256 bits long\")\n\n if not (1 < parameters.g < parameters.p):\n raise ValueError(\"g, p don't satisfy 1 < g < p.\")\n\n\nclass TPLinkJetStreamTelnet(TPLinkJetStreamBase):\n def telnet_login(\n self,\n pri_prompt_terminator: str = \"#\",\n alt_prompt_terminator: str = \">\",\n username_pattern: str = r\"User:\",\n pwd_pattern: str = r\"Password:\",\n delay_factor: float = 1.0,\n max_loops: int = 60,\n ) -> str:\n \"\"\"Telnet login: can be username/password or just password.\"\"\"\n return super().telnet_login(\n pri_prompt_terminator=pri_prompt_terminator,\n alt_prompt_terminator=alt_prompt_terminator,\n username_pattern=username_pattern,\n pwd_pattern=pwd_pattern,\n delay_factor=delay_factor,\n max_loops=max_loops,\n )\n", "path": "netmiko/tplink/tplink_jetstream.py"}]} | 4,006 | 196 |
gh_patches_debug_27473 | rasdani/github-patches | git_diff | huggingface__text-generation-inference-1947 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TGI 2.0.2 CodeLlama error `piece id is out of range.`
### System Info
ghcr.io/huggingface/text-generation-inference:2.0.2
### Information
- [X] Docker
- [ ] The CLI directly
### Tasks
- [X] An officially supported command
- [ ] My own modifications
### Reproduction
```bash
model=philschmid/code-llama-7b-text-to-sql
num_shard=1
max_input_length=2048
max_total_tokens=4096
max_prefill_token=4096 # 4096
docker run --gpus all -ti -p 8080:80 \
-e MODEL_ID=$model \
-e NUM_SHARD=$num_shard \
-e MAX_INPUT_LENGTH=$max_input_length \
-e MAX_TOTAL_TOKENS=$max_total_tokens \
-e MAX_BATCH_PREFILL_TOKENS=$max_prefill_token \
-e HF_TOKEN=$(cat ~/.cache/huggingface/token) \
ghcr.io/huggingface/text-generation-inference:2.0.2
```
### Expected behavior
Running Endpoints as with version 2.0.0
### Error
```bash
2024-05-14T12:30:52.830987Z ERROR text_generation_launcher: Method Warmup encountered an error.
Traceback (most recent call last):
File "/opt/conda/bin/text-generation-server", line 8, in <module>
sys.exit(app())
File "/opt/conda/lib/python3.10/site-packages/typer/main.py", line 311, in __call__
return get_command(self)(*args, **kwargs)
File "/opt/conda/lib/python3.10/site-packages/click/core.py", line 1157, in __call__
return self.main(*args, **kwargs)
File "/opt/conda/lib/python3.10/site-packages/typer/core.py", line 778, in main
return _main(
File "/opt/conda/lib/python3.10/site-packages/typer/core.py", line 216, in _main
rv = self.invoke(ctx)
File "/opt/conda/lib/python3.10/site-packages/click/core.py", line 1688, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/opt/conda/lib/python3.10/site-packages/click/core.py", line 1434, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/opt/conda/lib/python3.10/site-packages/click/core.py", line 783, in invoke
return __callback(*args, **kwargs)
File "/opt/conda/lib/python3.10/site-packages/typer/main.py", line 683, in wrapper
return callback(**use_params) # type: ignore
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/cli.py", line 90, in serve
server.serve(
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/server.py", line 253, in serve
asyncio.run(
File "/opt/conda/lib/python3.10/asyncio/runners.py", line 44, in run
return loop.run_until_complete(main)
File "/opt/conda/lib/python3.10/asyncio/base_events.py", line 636, in run_until_complete
self.run_forever()
File "/opt/conda/lib/python3.10/asyncio/base_events.py", line 603, in run_forever
self._run_once()
File "/opt/conda/lib/python3.10/asyncio/base_events.py", line 1909, in _run_once
handle._run()
File "/opt/conda/lib/python3.10/asyncio/events.py", line 80, in _run
self._context.run(self._callback, *self._args)
File "/opt/conda/lib/python3.10/site-packages/grpc_interceptor/server.py", line 165, in invoke_intercept_method
return await self.intercept(
> File "/opt/conda/lib/python3.10/site-packages/text_generation_server/interceptor.py", line 21, in intercept
return await response
File "/opt/conda/lib/python3.10/site-packages/opentelemetry/instrumentation/grpc/_aio_server.py", line 82, in _unary_interceptor
raise error
File "/opt/conda/lib/python3.10/site-packages/opentelemetry/instrumentation/grpc/_aio_server.py", line 73, in _unary_interceptor
return await behavior(request_or_iterator, context)
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/server.py", line 114, in Warmup
max_supported_total_tokens = self.model.warmup(batch)
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/models/flash_causal_lm.py", line 776, in warmup
_, batch, _ = self.generate_token(batch)
File "/opt/conda/lib/python3.10/contextlib.py", line 79, in inner
return func(*args, **kwds)
File "/opt/conda/lib/python3.10/site-packages/text_generation_server/models/flash_causal_lm.py", line 1206, in generate_token
toptoken_texts = self.tokenizer.batch_decode(
File "/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 3771, in batch_decode
return [
File "/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 3772, in <listcomp>
self.decode(
File "/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils_base.py", line 3811, in decode
return self._decode(
File "/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils.py", line 1001, in _decode
filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)
File "/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils.py", line 973, in convert_ids_to_tokens
return self._convert_id_to_token(ids)
File "/opt/conda/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama.py", line 277, in _convert_id_to_token
token = self.sp_model.IdToPiece(index)
File "/opt/conda/lib/python3.10/site-packages/sentencepiece/__init__.py", line 1045, in _batched_func
return _func(self, arg)
File "/opt/conda/lib/python3.10/site-packages/sentencepiece/__init__.py", line 1038, in _func
raise IndexError('piece id is out of range.')
IndexError: piece id is out of range.
2024-05-14T12:30:52.841680Z ERROR warmup{max_input_length=2048 max_prefill_tokens=4096 max_total_tokens=4096 max_batch_size=None}:warmup: text_generation_client: router/client/src/lib.rs:33: Server error: piece id is out of range.
Error: Warmup(Generation("piece id is out of range."))
2024-05-14T12:30:52.870991Z ERROR text_generation_launcher: Webserver Crashed
```
</issue>
<code>
[start of server/text_generation_server/models/flash_llama.py]
1 import torch
2 import torch.distributed
3
4 from opentelemetry import trace
5 from transformers import AutoConfig, AutoTokenizer, GenerationConfig
6 from transformers.models.llama import LlamaTokenizer
7 from typing import Optional
8
9 from text_generation_server.models import FlashCausalLM
10 from text_generation_server.models.custom_modeling.flash_llama_modeling import (
11 FlashLlamaForCausalLM,
12 )
13 from text_generation_server.utils import (
14 initialize_torch_distributed,
15 weight_files,
16 Weights,
17 )
18
19 tracer = trace.get_tracer(__name__)
20
21 from text_generation_server.utils.import_utils import SYSTEM
22
23
24 class FlashLlama(FlashCausalLM):
25 def __init__(
26 self,
27 model_id: str,
28 revision: Optional[str] = None,
29 quantize: Optional[str] = None,
30 speculator: Optional[str] = None,
31 dtype: Optional[torch.dtype] = None,
32 trust_remote_code: bool = False,
33 ):
34 self.process_group, rank, world_size = initialize_torch_distributed()
35 if torch.cuda.is_available():
36 device = torch.device(f"cuda:{rank}")
37 dtype = torch.float16 if dtype is None else dtype
38 elif SYSTEM == "xpu":
39 device = torch.device(f"xpu:{rank}")
40 dtype = torch.float16 if dtype is None else dtype
41 else:
42 raise NotImplementedError("FlashLlama is only available on GPU")
43
44 try:
45 tokenizer = LlamaTokenizer.from_pretrained(
46 model_id,
47 revision=revision,
48 padding_side="left",
49 truncation_side="left",
50 trust_remote_code=trust_remote_code,
51 )
52 except Exception:
53 tokenizer = AutoTokenizer.from_pretrained(
54 model_id,
55 revision=revision,
56 padding_side="left",
57 truncation_side="left",
58 trust_remote_code=trust_remote_code,
59 )
60 try:
61 generation_config = GenerationConfig.from_pretrained(
62 model_id, revision=revision, trust_remote_code=trust_remote_code
63 )
64 if isinstance(generation_config.eos_token_id, (list, set)):
65 # TODO Huge hack
66 tokenizer._eos_token_ids = set(generation_config.eos_token_id)
67 except Exception:
68 pass
69
70 config = AutoConfig.from_pretrained(
71 model_id, revision=revision, trust_remote_code=trust_remote_code
72 )
73 config.quantize = quantize
74 config.speculator = speculator
75
76 torch.distributed.barrier(group=self.process_group)
77
78 filenames = weight_files(model_id, revision=revision, extension=".safetensors")
79 weights = Weights(filenames, device, dtype, process_group=self.process_group)
80 if config.quantize in ["gptq", "awq"]:
81 weights._set_gptq_params(model_id, revision)
82
83 prefix = ""
84 model = FlashLlamaForCausalLM(prefix, config, weights)
85 torch.distributed.barrier(group=self.process_group)
86 super(FlashLlama, self).__init__(
87 model=model,
88 tokenizer=tokenizer,
89 num_layers=len(model.model.layers),
90 num_kv_heads=model.model.num_key_value_heads,
91 head_size=model.model.head_size,
92 dtype=dtype,
93 device=device,
94 rank=rank,
95 world_size=world_size,
96 )
97
[end of server/text_generation_server/models/flash_llama.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/server/text_generation_server/models/flash_llama.py b/server/text_generation_server/models/flash_llama.py
--- a/server/text_generation_server/models/flash_llama.py
+++ b/server/text_generation_server/models/flash_llama.py
@@ -3,7 +3,6 @@
from opentelemetry import trace
from transformers import AutoConfig, AutoTokenizer, GenerationConfig
-from transformers.models.llama import LlamaTokenizer
from typing import Optional
from text_generation_server.models import FlashCausalLM
@@ -41,22 +40,13 @@
else:
raise NotImplementedError("FlashLlama is only available on GPU")
- try:
- tokenizer = LlamaTokenizer.from_pretrained(
- model_id,
- revision=revision,
- padding_side="left",
- truncation_side="left",
- trust_remote_code=trust_remote_code,
- )
- except Exception:
- tokenizer = AutoTokenizer.from_pretrained(
- model_id,
- revision=revision,
- padding_side="left",
- truncation_side="left",
- trust_remote_code=trust_remote_code,
- )
+ tokenizer = AutoTokenizer.from_pretrained(
+ model_id,
+ revision=revision,
+ padding_side="left",
+ truncation_side="left",
+ trust_remote_code=trust_remote_code,
+ )
try:
generation_config = GenerationConfig.from_pretrained(
model_id, revision=revision, trust_remote_code=trust_remote_code
| {"golden_diff": "diff --git a/server/text_generation_server/models/flash_llama.py b/server/text_generation_server/models/flash_llama.py\n--- a/server/text_generation_server/models/flash_llama.py\n+++ b/server/text_generation_server/models/flash_llama.py\n@@ -3,7 +3,6 @@\n \n from opentelemetry import trace\n from transformers import AutoConfig, AutoTokenizer, GenerationConfig\n-from transformers.models.llama import LlamaTokenizer\n from typing import Optional\n \n from text_generation_server.models import FlashCausalLM\n@@ -41,22 +40,13 @@\n else:\n raise NotImplementedError(\"FlashLlama is only available on GPU\")\n \n- try:\n- tokenizer = LlamaTokenizer.from_pretrained(\n- model_id,\n- revision=revision,\n- padding_side=\"left\",\n- truncation_side=\"left\",\n- trust_remote_code=trust_remote_code,\n- )\n- except Exception:\n- tokenizer = AutoTokenizer.from_pretrained(\n- model_id,\n- revision=revision,\n- padding_side=\"left\",\n- truncation_side=\"left\",\n- trust_remote_code=trust_remote_code,\n- )\n+ tokenizer = AutoTokenizer.from_pretrained(\n+ model_id,\n+ revision=revision,\n+ padding_side=\"left\",\n+ truncation_side=\"left\",\n+ trust_remote_code=trust_remote_code,\n+ )\n try:\n generation_config = GenerationConfig.from_pretrained(\n model_id, revision=revision, trust_remote_code=trust_remote_code\n", "issue": "TGI 2.0.2 CodeLlama error `piece id is out of range.`\n### System Info\r\n\r\nghcr.io/huggingface/text-generation-inference:2.0.2\r\n\r\n### Information\r\n\r\n- [X] Docker\r\n- [ ] The CLI directly\r\n\r\n### Tasks\r\n\r\n- [X] An officially supported command\r\n- [ ] My own modifications\r\n\r\n### Reproduction\r\n\r\n```bash\r\nmodel=philschmid/code-llama-7b-text-to-sql\r\nnum_shard=1\r\nmax_input_length=2048\r\nmax_total_tokens=4096\r\nmax_prefill_token=4096 # 4096\r\n\r\ndocker run --gpus all -ti -p 8080:80 \\\r\n -e MODEL_ID=$model \\\r\n -e NUM_SHARD=$num_shard \\\r\n -e MAX_INPUT_LENGTH=$max_input_length \\\r\n -e MAX_TOTAL_TOKENS=$max_total_tokens \\\r\n -e MAX_BATCH_PREFILL_TOKENS=$max_prefill_token \\\r\n -e HF_TOKEN=$(cat ~/.cache/huggingface/token) \\\r\n ghcr.io/huggingface/text-generation-inference:2.0.2\r\n```\r\n\r\n### Expected behavior\r\n\r\nRunning Endpoints as with version 2.0.0\r\n\r\n\r\n### Error\r\n\r\n```bash\r\n2024-05-14T12:30:52.830987Z ERROR text_generation_launcher: Method Warmup encountered an error.\r\nTraceback (most recent call last):\r\n File \"/opt/conda/bin/text-generation-server\", line 8, in <module>\r\n sys.exit(app())\r\n File \"/opt/conda/lib/python3.10/site-packages/typer/main.py\", line 311, in __call__\r\n return get_command(self)(*args, **kwargs)\r\n File \"/opt/conda/lib/python3.10/site-packages/click/core.py\", line 1157, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/opt/conda/lib/python3.10/site-packages/typer/core.py\", line 778, in main\r\n return _main(\r\n File \"/opt/conda/lib/python3.10/site-packages/typer/core.py\", line 216, in _main\r\n rv = self.invoke(ctx)\r\n File \"/opt/conda/lib/python3.10/site-packages/click/core.py\", line 1688, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/opt/conda/lib/python3.10/site-packages/click/core.py\", line 1434, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/opt/conda/lib/python3.10/site-packages/click/core.py\", line 783, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"/opt/conda/lib/python3.10/site-packages/typer/main.py\", line 683, in wrapper\r\n return callback(**use_params) # type: ignore\r\n File \"/opt/conda/lib/python3.10/site-packages/text_generation_server/cli.py\", line 90, in serve\r\n server.serve(\r\n File \"/opt/conda/lib/python3.10/site-packages/text_generation_server/server.py\", line 253, in serve\r\n asyncio.run(\r\n File \"/opt/conda/lib/python3.10/asyncio/runners.py\", line 44, in run\r\n return loop.run_until_complete(main)\r\n File \"/opt/conda/lib/python3.10/asyncio/base_events.py\", line 636, in run_until_complete\r\n self.run_forever()\r\n File \"/opt/conda/lib/python3.10/asyncio/base_events.py\", line 603, in run_forever\r\n self._run_once()\r\n File \"/opt/conda/lib/python3.10/asyncio/base_events.py\", line 1909, in _run_once\r\n handle._run()\r\n File \"/opt/conda/lib/python3.10/asyncio/events.py\", line 80, in _run\r\n self._context.run(self._callback, *self._args)\r\n File \"/opt/conda/lib/python3.10/site-packages/grpc_interceptor/server.py\", line 165, in invoke_intercept_method\r\n return await self.intercept(\r\n> File \"/opt/conda/lib/python3.10/site-packages/text_generation_server/interceptor.py\", line 21, in intercept\r\n return await response\r\n File \"/opt/conda/lib/python3.10/site-packages/opentelemetry/instrumentation/grpc/_aio_server.py\", line 82, in _unary_interceptor\r\n raise error\r\n File \"/opt/conda/lib/python3.10/site-packages/opentelemetry/instrumentation/grpc/_aio_server.py\", line 73, in _unary_interceptor\r\n return await behavior(request_or_iterator, context)\r\n File \"/opt/conda/lib/python3.10/site-packages/text_generation_server/server.py\", line 114, in Warmup\r\n max_supported_total_tokens = self.model.warmup(batch)\r\n File \"/opt/conda/lib/python3.10/site-packages/text_generation_server/models/flash_causal_lm.py\", line 776, in warmup\r\n _, batch, _ = self.generate_token(batch)\r\n File \"/opt/conda/lib/python3.10/contextlib.py\", line 79, in inner\r\n return func(*args, **kwds)\r\n File \"/opt/conda/lib/python3.10/site-packages/text_generation_server/models/flash_causal_lm.py\", line 1206, in generate_token\r\n toptoken_texts = self.tokenizer.batch_decode(\r\n File \"/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils_base.py\", line 3771, in batch_decode\r\n return [\r\n File \"/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils_base.py\", line 3772, in <listcomp>\r\n self.decode(\r\n File \"/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils_base.py\", line 3811, in decode\r\n return self._decode(\r\n File \"/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils.py\", line 1001, in _decode\r\n filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)\r\n File \"/opt/conda/lib/python3.10/site-packages/transformers/tokenization_utils.py\", line 973, in convert_ids_to_tokens\r\n return self._convert_id_to_token(ids)\r\n File \"/opt/conda/lib/python3.10/site-packages/transformers/models/llama/tokenization_llama.py\", line 277, in _convert_id_to_token\r\n token = self.sp_model.IdToPiece(index)\r\n File \"/opt/conda/lib/python3.10/site-packages/sentencepiece/__init__.py\", line 1045, in _batched_func\r\n return _func(self, arg)\r\n File \"/opt/conda/lib/python3.10/site-packages/sentencepiece/__init__.py\", line 1038, in _func\r\n raise IndexError('piece id is out of range.')\r\nIndexError: piece id is out of range.\r\n\r\n2024-05-14T12:30:52.841680Z ERROR warmup{max_input_length=2048 max_prefill_tokens=4096 max_total_tokens=4096 max_batch_size=None}:warmup: text_generation_client: router/client/src/lib.rs:33: Server error: piece id is out of range.\r\nError: Warmup(Generation(\"piece id is out of range.\"))\r\n2024-05-14T12:30:52.870991Z ERROR text_generation_launcher: Webserver Crashed\r\n```\n", "before_files": [{"content": "import torch\nimport torch.distributed\n\nfrom opentelemetry import trace\nfrom transformers import AutoConfig, AutoTokenizer, GenerationConfig\nfrom transformers.models.llama import LlamaTokenizer\nfrom typing import Optional\n\nfrom text_generation_server.models import FlashCausalLM\nfrom text_generation_server.models.custom_modeling.flash_llama_modeling import (\n FlashLlamaForCausalLM,\n)\nfrom text_generation_server.utils import (\n initialize_torch_distributed,\n weight_files,\n Weights,\n)\n\ntracer = trace.get_tracer(__name__)\n\nfrom text_generation_server.utils.import_utils import SYSTEM\n\n\nclass FlashLlama(FlashCausalLM):\n def __init__(\n self,\n model_id: str,\n revision: Optional[str] = None,\n quantize: Optional[str] = None,\n speculator: Optional[str] = None,\n dtype: Optional[torch.dtype] = None,\n trust_remote_code: bool = False,\n ):\n self.process_group, rank, world_size = initialize_torch_distributed()\n if torch.cuda.is_available():\n device = torch.device(f\"cuda:{rank}\")\n dtype = torch.float16 if dtype is None else dtype\n elif SYSTEM == \"xpu\":\n device = torch.device(f\"xpu:{rank}\")\n dtype = torch.float16 if dtype is None else dtype\n else:\n raise NotImplementedError(\"FlashLlama is only available on GPU\")\n\n try:\n tokenizer = LlamaTokenizer.from_pretrained(\n model_id,\n revision=revision,\n padding_side=\"left\",\n truncation_side=\"left\",\n trust_remote_code=trust_remote_code,\n )\n except Exception:\n tokenizer = AutoTokenizer.from_pretrained(\n model_id,\n revision=revision,\n padding_side=\"left\",\n truncation_side=\"left\",\n trust_remote_code=trust_remote_code,\n )\n try:\n generation_config = GenerationConfig.from_pretrained(\n model_id, revision=revision, trust_remote_code=trust_remote_code\n )\n if isinstance(generation_config.eos_token_id, (list, set)):\n # TODO Huge hack\n tokenizer._eos_token_ids = set(generation_config.eos_token_id)\n except Exception:\n pass\n\n config = AutoConfig.from_pretrained(\n model_id, revision=revision, trust_remote_code=trust_remote_code\n )\n config.quantize = quantize\n config.speculator = speculator\n\n torch.distributed.barrier(group=self.process_group)\n\n filenames = weight_files(model_id, revision=revision, extension=\".safetensors\")\n weights = Weights(filenames, device, dtype, process_group=self.process_group)\n if config.quantize in [\"gptq\", \"awq\"]:\n weights._set_gptq_params(model_id, revision)\n\n prefix = \"\"\n model = FlashLlamaForCausalLM(prefix, config, weights)\n torch.distributed.barrier(group=self.process_group)\n super(FlashLlama, self).__init__(\n model=model,\n tokenizer=tokenizer,\n num_layers=len(model.model.layers),\n num_kv_heads=model.model.num_key_value_heads,\n head_size=model.model.head_size,\n dtype=dtype,\n device=device,\n rank=rank,\n world_size=world_size,\n )\n", "path": "server/text_generation_server/models/flash_llama.py"}]} | 3,170 | 325 |
gh_patches_debug_28660 | rasdani/github-patches | git_diff | mozilla__pontoon-2675 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
No warnings when trying to submit empty translations
I've noticed an increase in the number of empty strings in Firefox, where I have [special checks](https://test.flod.org/checks/).
Apparently, we don't warn anymore when someone tries to submit an empty translation.
</issue>
<code>
[start of pontoon/checks/libraries/pontoon_db.py]
1 import html
2 import re
3
4 import bleach
5
6 from collections import defaultdict
7 from fluent.syntax import FluentParser, ast
8
9 from pontoon.sync.formats.ftl import localizable_entries
10
11
12 MAX_LENGTH_RE = re.compile(r"MAX_LENGTH:( *)(\d+)", re.MULTILINE)
13 parser = FluentParser()
14
15
16 def get_max_length(comment):
17 """
18 Return max length value for an entity with MAX_LENTH.
19 """
20 max_length = re.findall(MAX_LENGTH_RE, comment or "")
21
22 if max_length:
23 return int(max_length[0][1])
24
25 return None
26
27
28 def run_checks(entity, original, string):
29 """
30 Group all checks related to the base UI that get stored in the DB
31 :arg pontoon.base.models.Entity entity: Source entity
32 :arg basestring original: an original string
33 :arg basestring string: a translation
34 """
35 checks = defaultdict(list)
36 resource_ext = entity.resource.format
37
38 if resource_ext == "lang":
39 # Newlines are not allowed in .lang files (bug 1190754)
40 if "\n" in string:
41 checks["pErrors"].append("Newline characters are not allowed")
42
43 # Prevent translations exceeding the given length limit
44 max_length = get_max_length(entity.comment)
45
46 if max_length:
47 string_length = len(
48 html.unescape(bleach.clean(string, strip=True, tags=()))
49 )
50
51 if string_length > max_length:
52 checks["pErrors"].append("Translation too long")
53
54 # Bug 1599056: Original and translation must either both end in a newline,
55 # or none of them should.
56 if resource_ext == "po":
57 if original.endswith("\n") != string.endswith("\n"):
58 checks["pErrors"].append("Ending newline mismatch")
59
60 # Prevent empty translation submissions if not supported
61 if string == "" and not entity.resource.allows_empty_translations:
62 checks["pErrors"].append("Empty translations are not allowed")
63
64 # FTL checks
65 if resource_ext == "ftl" and string != "":
66 translation_ast = parser.parse_entry(string)
67 entity_ast = parser.parse_entry(entity.string)
68
69 # Parse error
70 if isinstance(translation_ast, ast.Junk):
71 checks["pErrors"].append(translation_ast.annotations[0].message)
72
73 # Not a localizable entry
74 elif not isinstance(translation_ast, localizable_entries):
75 checks["pErrors"].append(
76 "Translation needs to be a valid localizable entry"
77 )
78
79 # Message ID mismatch
80 elif entity_ast.id.name != translation_ast.id.name:
81 checks["pErrors"].append("Translation key needs to match source string key")
82
83 return checks
84
[end of pontoon/checks/libraries/pontoon_db.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pontoon/checks/libraries/pontoon_db.py b/pontoon/checks/libraries/pontoon_db.py
--- a/pontoon/checks/libraries/pontoon_db.py
+++ b/pontoon/checks/libraries/pontoon_db.py
@@ -5,6 +5,7 @@
from collections import defaultdict
from fluent.syntax import FluentParser, ast
+from fluent.syntax.visitor import Visitor
from pontoon.sync.formats.ftl import localizable_entries
@@ -25,6 +26,24 @@
return None
+class IsEmptyVisitor(Visitor):
+ def __init__(self):
+ self.is_empty = True
+
+ def visit_Placeable(self, node):
+ if isinstance(node.expression, ast.Literal):
+ if node.expression.parse()["value"]:
+ self.is_empty = False
+ elif isinstance(node.expression, ast.SelectExpression):
+ self.generic_visit(node.expression)
+ else:
+ self.is_empty = False
+
+ def visit_TextElement(self, node):
+ if node.value:
+ self.is_empty = False
+
+
def run_checks(entity, original, string):
"""
Group all checks related to the base UI that get stored in the DB
@@ -80,4 +99,12 @@
elif entity_ast.id.name != translation_ast.id.name:
checks["pErrors"].append("Translation key needs to match source string key")
+ # Empty translation entry warning; set here rather than pontoon_non_db.py
+ # to avoid needing to parse the Fluent message twice.
+ else:
+ visitor = IsEmptyVisitor()
+ visitor.visit(translation_ast)
+ if visitor.is_empty:
+ checks["pndbWarnings"].append("Empty translation")
+
return checks
| {"golden_diff": "diff --git a/pontoon/checks/libraries/pontoon_db.py b/pontoon/checks/libraries/pontoon_db.py\n--- a/pontoon/checks/libraries/pontoon_db.py\n+++ b/pontoon/checks/libraries/pontoon_db.py\n@@ -5,6 +5,7 @@\n \n from collections import defaultdict\n from fluent.syntax import FluentParser, ast\n+from fluent.syntax.visitor import Visitor\n \n from pontoon.sync.formats.ftl import localizable_entries\n \n@@ -25,6 +26,24 @@\n return None\n \n \n+class IsEmptyVisitor(Visitor):\n+ def __init__(self):\n+ self.is_empty = True\n+\n+ def visit_Placeable(self, node):\n+ if isinstance(node.expression, ast.Literal):\n+ if node.expression.parse()[\"value\"]:\n+ self.is_empty = False\n+ elif isinstance(node.expression, ast.SelectExpression):\n+ self.generic_visit(node.expression)\n+ else:\n+ self.is_empty = False\n+\n+ def visit_TextElement(self, node):\n+ if node.value:\n+ self.is_empty = False\n+\n+\n def run_checks(entity, original, string):\n \"\"\"\n Group all checks related to the base UI that get stored in the DB\n@@ -80,4 +99,12 @@\n elif entity_ast.id.name != translation_ast.id.name:\n checks[\"pErrors\"].append(\"Translation key needs to match source string key\")\n \n+ # Empty translation entry warning; set here rather than pontoon_non_db.py\n+ # to avoid needing to parse the Fluent message twice.\n+ else:\n+ visitor = IsEmptyVisitor()\n+ visitor.visit(translation_ast)\n+ if visitor.is_empty:\n+ checks[\"pndbWarnings\"].append(\"Empty translation\")\n+\n return checks\n", "issue": "No warnings when trying to submit empty translations\nI've noticed an increase in the number of empty strings in Firefox, where I have [special checks](https://test.flod.org/checks/).\r\n\r\nApparently, we don't warn anymore when someone tries to submit an empty translation.\n", "before_files": [{"content": "import html\nimport re\n\nimport bleach\n\nfrom collections import defaultdict\nfrom fluent.syntax import FluentParser, ast\n\nfrom pontoon.sync.formats.ftl import localizable_entries\n\n\nMAX_LENGTH_RE = re.compile(r\"MAX_LENGTH:( *)(\\d+)\", re.MULTILINE)\nparser = FluentParser()\n\n\ndef get_max_length(comment):\n \"\"\"\n Return max length value for an entity with MAX_LENTH.\n \"\"\"\n max_length = re.findall(MAX_LENGTH_RE, comment or \"\")\n\n if max_length:\n return int(max_length[0][1])\n\n return None\n\n\ndef run_checks(entity, original, string):\n \"\"\"\n Group all checks related to the base UI that get stored in the DB\n :arg pontoon.base.models.Entity entity: Source entity\n :arg basestring original: an original string\n :arg basestring string: a translation\n \"\"\"\n checks = defaultdict(list)\n resource_ext = entity.resource.format\n\n if resource_ext == \"lang\":\n # Newlines are not allowed in .lang files (bug 1190754)\n if \"\\n\" in string:\n checks[\"pErrors\"].append(\"Newline characters are not allowed\")\n\n # Prevent translations exceeding the given length limit\n max_length = get_max_length(entity.comment)\n\n if max_length:\n string_length = len(\n html.unescape(bleach.clean(string, strip=True, tags=()))\n )\n\n if string_length > max_length:\n checks[\"pErrors\"].append(\"Translation too long\")\n\n # Bug 1599056: Original and translation must either both end in a newline,\n # or none of them should.\n if resource_ext == \"po\":\n if original.endswith(\"\\n\") != string.endswith(\"\\n\"):\n checks[\"pErrors\"].append(\"Ending newline mismatch\")\n\n # Prevent empty translation submissions if not supported\n if string == \"\" and not entity.resource.allows_empty_translations:\n checks[\"pErrors\"].append(\"Empty translations are not allowed\")\n\n # FTL checks\n if resource_ext == \"ftl\" and string != \"\":\n translation_ast = parser.parse_entry(string)\n entity_ast = parser.parse_entry(entity.string)\n\n # Parse error\n if isinstance(translation_ast, ast.Junk):\n checks[\"pErrors\"].append(translation_ast.annotations[0].message)\n\n # Not a localizable entry\n elif not isinstance(translation_ast, localizable_entries):\n checks[\"pErrors\"].append(\n \"Translation needs to be a valid localizable entry\"\n )\n\n # Message ID mismatch\n elif entity_ast.id.name != translation_ast.id.name:\n checks[\"pErrors\"].append(\"Translation key needs to match source string key\")\n\n return checks\n", "path": "pontoon/checks/libraries/pontoon_db.py"}]} | 1,354 | 394 |
gh_patches_debug_6668 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-635 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Natsort import error
# Brief Description
The module `natsort` isn't found because it was added (in #627) to dev requirements but needs to be in the main requirements file. It is imported with all functions from the init script.
Rather than requiring it, perhaps it could also be brought in with a `try`, `except` per #97
# Error Messages
```
/usr/local/lib/python3.7/site-packages/janitor/functions.py:25: in <module>
from natsort import index_natsorted, natsorted
E ModuleNotFoundError: No module named 'natsort'
```
</issue>
<code>
[start of setup.py]
1 import re
2 from pathlib import Path
3
4 from setuptools import setup
5
6
7 def requirements():
8 with open("requirements.txt", "r+") as f:
9 return f.read()
10
11
12 def generate_long_description() -> str:
13 """
14 Extra chunks from README for PyPI description.
15
16 Target chunks must be contained within `.. pypi-doc` pair comments,
17 so there must be an even number of comments in README.
18
19 :returns: Extracted description from README
20
21 """
22 # Read the contents of README file
23 this_directory = Path(__file__).parent
24 with open(this_directory / "README.rst", encoding="utf-8") as f:
25 readme = f.read()
26
27 # Find pypi-doc comments in README
28 indices = [m.start() for m in re.finditer(".. pypi-doc", readme)]
29 if len(indices) % 2 != 0:
30 raise Exception("Odd number of `.. pypi-doc` comments in README")
31
32 # Loop through pairs of comments and save text between pairs
33 long_description = ""
34 for i in range(0, len(indices), 2):
35 start_index = indices[i] + 11
36 end_index = indices[i + 1]
37 long_description += readme[start_index:end_index]
38 return long_description
39
40
41 extra_spark = ["pyspark"]
42 extra_biology = ["biopython"]
43 extra_chemistry = ["rdkit"]
44 extra_engineering = ["unyt"]
45 extra_all = extra_biology + extra_engineering + extra_spark
46
47 setup(
48 name="pyjanitor",
49 version="0.20.1",
50 description="Tools for cleaning pandas DataFrames",
51 author="Eric J. Ma",
52 author_email="[email protected]",
53 url="https://github.com/ericmjl/pyjanitor",
54 license="MIT",
55 packages=["janitor"],
56 install_requires=requirements(),
57 extras_require={
58 "all": extra_all,
59 "biology": extra_biology,
60 # "chemistry": extra_chemistry, should be inserted once rdkit
61 # fixes https://github.com/rdkit/rdkit/issues/1812
62 "engineering": extra_engineering,
63 "spark": extra_spark,
64 },
65 python_requires=">=3.6",
66 long_description=generate_long_description(),
67 long_description_content_type="text/x-rst",
68 )
69
[end of setup.py]
[start of janitor/__init__.py]
1 try:
2 import janitor.xarray
3 except ImportError:
4 pass
5
6 from .functions import * # noqa: F403, F401
7 from .math import *
8 from .ml import get_features_targets as _get_features_targets
9 from .utils import refactored_function
10
11 # from .dataframe import JanitorDataFrame as DataFrame # noqa: F401
12 # from .dataframe import JanitorSeries as Series # noqa: F401
13
14
15 @refactored_function(
16 "get_features_targets() has moved. Please use ml.get_features_targets()."
17 )
18 def get_features_targets(*args, **kwargs):
19 return _get_features_targets(*args, **kwargs)
20
21
22 __version__ = "0.20.1"
23
[end of janitor/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/janitor/__init__.py b/janitor/__init__.py
--- a/janitor/__init__.py
+++ b/janitor/__init__.py
@@ -19,4 +19,4 @@
return _get_features_targets(*args, **kwargs)
-__version__ = "0.20.1"
+__version__ = "0.20.2"
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -46,7 +46,7 @@
setup(
name="pyjanitor",
- version="0.20.1",
+ version="0.20.2",
description="Tools for cleaning pandas DataFrames",
author="Eric J. Ma",
author_email="[email protected]",
| {"golden_diff": "diff --git a/janitor/__init__.py b/janitor/__init__.py\n--- a/janitor/__init__.py\n+++ b/janitor/__init__.py\n@@ -19,4 +19,4 @@\n return _get_features_targets(*args, **kwargs)\n \n \n-__version__ = \"0.20.1\"\n+__version__ = \"0.20.2\"\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -46,7 +46,7 @@\n \n setup(\n name=\"pyjanitor\",\n- version=\"0.20.1\",\n+ version=\"0.20.2\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"Eric J. Ma\",\n author_email=\"[email protected]\",\n", "issue": "Natsort import error\n# Brief Description\r\n\r\nThe module `natsort` isn't found because it was added (in #627) to dev requirements but needs to be in the main requirements file. It is imported with all functions from the init script. \r\n\r\nRather than requiring it, perhaps it could also be brought in with a `try`, `except` per #97 \r\n\r\n# Error Messages\r\n\r\n```\r\n /usr/local/lib/python3.7/site-packages/janitor/functions.py:25: in <module>\r\n from natsort import index_natsorted, natsorted\r\n E ModuleNotFoundError: No module named 'natsort'\r\n```\n", "before_files": [{"content": "import re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef requirements():\n with open(\"requirements.txt\", \"r+\") as f:\n return f.read()\n\n\ndef generate_long_description() -> str:\n \"\"\"\n Extra chunks from README for PyPI description.\n\n Target chunks must be contained within `.. pypi-doc` pair comments,\n so there must be an even number of comments in README.\n\n :returns: Extracted description from README\n\n \"\"\"\n # Read the contents of README file\n this_directory = Path(__file__).parent\n with open(this_directory / \"README.rst\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n # Find pypi-doc comments in README\n indices = [m.start() for m in re.finditer(\".. pypi-doc\", readme)]\n if len(indices) % 2 != 0:\n raise Exception(\"Odd number of `.. pypi-doc` comments in README\")\n\n # Loop through pairs of comments and save text between pairs\n long_description = \"\"\n for i in range(0, len(indices), 2):\n start_index = indices[i] + 11\n end_index = indices[i + 1]\n long_description += readme[start_index:end_index]\n return long_description\n\n\nextra_spark = [\"pyspark\"]\nextra_biology = [\"biopython\"]\nextra_chemistry = [\"rdkit\"]\nextra_engineering = [\"unyt\"]\nextra_all = extra_biology + extra_engineering + extra_spark\n\nsetup(\n name=\"pyjanitor\",\n version=\"0.20.1\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"Eric J. Ma\",\n author_email=\"[email protected]\",\n url=\"https://github.com/ericmjl/pyjanitor\",\n license=\"MIT\",\n packages=[\"janitor\"],\n install_requires=requirements(),\n extras_require={\n \"all\": extra_all,\n \"biology\": extra_biology,\n # \"chemistry\": extra_chemistry, should be inserted once rdkit\n # fixes https://github.com/rdkit/rdkit/issues/1812\n \"engineering\": extra_engineering,\n \"spark\": extra_spark,\n },\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n)\n", "path": "setup.py"}, {"content": "try:\n import janitor.xarray\nexcept ImportError:\n pass\n\nfrom .functions import * # noqa: F403, F401\nfrom .math import *\nfrom .ml import get_features_targets as _get_features_targets\nfrom .utils import refactored_function\n\n# from .dataframe import JanitorDataFrame as DataFrame # noqa: F401\n# from .dataframe import JanitorSeries as Series # noqa: F401\n\n\n@refactored_function(\n \"get_features_targets() has moved. Please use ml.get_features_targets().\"\n)\ndef get_features_targets(*args, **kwargs):\n return _get_features_targets(*args, **kwargs)\n\n\n__version__ = \"0.20.1\"\n", "path": "janitor/__init__.py"}]} | 1,532 | 184 |
gh_patches_debug_13846 | rasdani/github-patches | git_diff | wagtail__wagtail-7122 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Hallo editor does not work in StructBlock
I upgraded an old Wagtail project to test Wagtail 2.13rc1 and found that text is not rendered into the Hallo.js rich text editor if the editor is part of a StructBlock. The editor behaves just fine if it is used in its own RichTextField.
### Steps to Reproduce
I created a minimal repository to demostrate this issue: https://github.com/cnk/hallo-issue I have included the sqlite database in the repository so you should be able to see this in action by:
1. check out https://github.com/cnk/hallo-issue
2. Activate a virtual environment and `pip install -r requirements.txt`
3. `./manage.py createsuperuser`
4. `./manage.py runserver`
### Technical details
* Python version: I used 3.7.7
* Django version: 3.2
* Wagtail version: 2.13rc1
* Browser version: I tried it with Firefox and Chrome.
</issue>
<code>
[start of wagtail/admin/rich_text/editors/hallo.py]
1 import json
2
3 from collections import OrderedDict
4
5 from django.forms import Media, widgets
6 from django.utils.functional import cached_property
7
8 from wagtail.admin.edit_handlers import RichTextFieldPanel
9 from wagtail.admin.rich_text.converters.editor_html import EditorHTMLConverter
10 from wagtail.admin.staticfiles import versioned_static
11 from wagtail.core.rich_text import features
12
13
14 class HalloPlugin:
15 def __init__(self, **kwargs):
16 self.name = kwargs.get('name', None)
17 self.options = kwargs.get('options', {})
18 self.js = kwargs.get('js', [])
19 self.css = kwargs.get('css', {})
20 self.order = kwargs.get('order', 100)
21
22 def construct_plugins_list(self, plugins):
23 if self.name is not None:
24 plugins[self.name] = self.options
25
26 @property
27 def media(self):
28 js = [versioned_static(js_file) for js_file in self.js]
29 css = {}
30 for media_type, css_files in self.css.items():
31 css[media_type] = [versioned_static(css_file) for css_file in css_files]
32
33 return Media(js=js, css=css)
34
35
36 class HalloFormatPlugin(HalloPlugin):
37 def __init__(self, **kwargs):
38 kwargs.setdefault('name', 'halloformat')
39 kwargs.setdefault('order', 10)
40 self.format_name = kwargs['format_name']
41 super().__init__(**kwargs)
42
43 def construct_plugins_list(self, plugins):
44 plugins.setdefault(self.name, {'formattings': {
45 'bold': False, 'italic': False, 'strikeThrough': False, 'underline': False
46 }})
47 plugins[self.name]['formattings'][self.format_name] = True
48
49
50 class HalloHeadingPlugin(HalloPlugin):
51 default_order = 20
52
53 def __init__(self, **kwargs):
54 kwargs.setdefault('name', 'halloheadings')
55 kwargs.setdefault('order', self.default_order)
56 self.element = kwargs.pop('element')
57 super().__init__(**kwargs)
58
59 def construct_plugins_list(self, plugins):
60 plugins.setdefault(self.name, {'formatBlocks': []})
61 plugins[self.name]['formatBlocks'].append(self.element)
62
63
64 class HalloListPlugin(HalloPlugin):
65 def __init__(self, **kwargs):
66 kwargs.setdefault('name', 'hallolists')
67 kwargs.setdefault('order', 40)
68 self.list_type = kwargs['list_type']
69 super().__init__(**kwargs)
70
71 def construct_plugins_list(self, plugins):
72 plugins.setdefault(self.name, {'lists': {
73 'ordered': False, 'unordered': False
74 }})
75 plugins[self.name]['lists'][self.list_type] = True
76
77
78 class HalloRequireParagraphsPlugin(HalloPlugin):
79 @property
80 def media(self):
81 return Media(js=[
82 versioned_static('wagtailadmin/js/hallo-plugins/hallo-requireparagraphs.js'),
83 ]) + super().media
84
85
86 # Plugins which are always imported, and cannot be enabled/disabled via 'features'
87 CORE_HALLO_PLUGINS = [
88 HalloPlugin(name='halloreundo', order=50),
89 HalloRequireParagraphsPlugin(name='hallorequireparagraphs'),
90 HalloHeadingPlugin(element='p')
91 ]
92
93
94 class HalloRichTextArea(widgets.Textarea):
95 template_name = 'wagtailadmin/widgets/hallo_rich_text_area.html'
96
97 # this class's constructor accepts a 'features' kwarg
98 accepts_features = True
99
100 def get_panel(self):
101 return RichTextFieldPanel
102
103 def __init__(self, *args, **kwargs):
104 self.options = kwargs.pop('options', None)
105
106 self.features = kwargs.pop('features', None)
107 if self.features is None:
108 self.features = features.get_default_features()
109
110 self.converter = EditorHTMLConverter(self.features)
111
112 # construct a list of plugin objects, by querying the feature registry
113 # and keeping the non-null responses from get_editor_plugin
114 self.plugins = CORE_HALLO_PLUGINS + list(filter(None, [
115 features.get_editor_plugin('hallo', feature_name)
116 for feature_name in self.features
117 ]))
118 self.plugins.sort(key=lambda plugin: plugin.order)
119
120 super().__init__(*args, **kwargs)
121
122 def format_value(self, value):
123 # Convert database rich text representation to the format required by
124 # the input field
125 value = super().format_value(value)
126
127 if value is None:
128 return None
129
130 return self.converter.from_database_format(value)
131
132 def get_context(self, name, value, attrs):
133 context = super().get_context(name, value, attrs)
134
135 if self.options is not None and 'plugins' in self.options:
136 # explicit 'plugins' config passed in options, so use that
137 plugin_data = self.options['plugins']
138 else:
139 plugin_data = OrderedDict()
140 for plugin in self.plugins:
141 plugin.construct_plugins_list(plugin_data)
142 context['widget']['plugins_json'] = json.dumps(plugin_data)
143
144 return context
145
146 def value_from_datadict(self, data, files, name):
147 original_value = super().value_from_datadict(data, files, name)
148 if original_value is None:
149 return None
150 return self.converter.to_database_format(original_value)
151
152 @cached_property
153 def media(self):
154 media = Media(js=[
155 versioned_static('wagtailadmin/js/vendor/hallo.js'),
156 versioned_static('wagtailadmin/js/hallo-bootstrap.js'),
157 ], css={
158 'all': [versioned_static('wagtailadmin/css/panels/hallo.css')]
159 })
160
161 for plugin in self.plugins:
162 media += plugin.media
163
164 return media
165
[end of wagtail/admin/rich_text/editors/hallo.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/admin/rich_text/editors/hallo.py b/wagtail/admin/rich_text/editors/hallo.py
--- a/wagtail/admin/rich_text/editors/hallo.py
+++ b/wagtail/admin/rich_text/editors/hallo.py
@@ -9,6 +9,8 @@
from wagtail.admin.rich_text.converters.editor_html import EditorHTMLConverter
from wagtail.admin.staticfiles import versioned_static
from wagtail.core.rich_text import features
+from wagtail.core.telepath import register
+from wagtail.core.widget_adapters import WidgetAdapter
class HalloPlugin:
@@ -162,3 +164,10 @@
media += plugin.media
return media
+
+
+class HalloRichTextAreaAdapter(WidgetAdapter):
+ js_constructor = 'wagtail.widgets.HalloRichTextArea'
+
+
+register(HalloRichTextAreaAdapter(), HalloRichTextArea)
| {"golden_diff": "diff --git a/wagtail/admin/rich_text/editors/hallo.py b/wagtail/admin/rich_text/editors/hallo.py\n--- a/wagtail/admin/rich_text/editors/hallo.py\n+++ b/wagtail/admin/rich_text/editors/hallo.py\n@@ -9,6 +9,8 @@\n from wagtail.admin.rich_text.converters.editor_html import EditorHTMLConverter\n from wagtail.admin.staticfiles import versioned_static\n from wagtail.core.rich_text import features\n+from wagtail.core.telepath import register\n+from wagtail.core.widget_adapters import WidgetAdapter\n \n \n class HalloPlugin:\n@@ -162,3 +164,10 @@\n media += plugin.media\n \n return media\n+\n+\n+class HalloRichTextAreaAdapter(WidgetAdapter):\n+ js_constructor = 'wagtail.widgets.HalloRichTextArea'\n+\n+\n+register(HalloRichTextAreaAdapter(), HalloRichTextArea)\n", "issue": "Hallo editor does not work in StructBlock\nI upgraded an old Wagtail project to test Wagtail 2.13rc1 and found that text is not rendered into the Hallo.js rich text editor if the editor is part of a StructBlock. The editor behaves just fine if it is used in its own RichTextField. \r\n\r\n### Steps to Reproduce\r\n\r\nI created a minimal repository to demostrate this issue: https://github.com/cnk/hallo-issue I have included the sqlite database in the repository so you should be able to see this in action by: \r\n\r\n1. check out https://github.com/cnk/hallo-issue\r\n2. Activate a virtual environment and `pip install -r requirements.txt`\r\n3. `./manage.py createsuperuser`\r\n4. `./manage.py runserver`\r\n\r\n### Technical details\r\n\r\n* Python version: I used 3.7.7\r\n* Django version: 3.2\r\n* Wagtail version: 2.13rc1\r\n* Browser version: I tried it with Firefox and Chrome.\r\n\n", "before_files": [{"content": "import json\n\nfrom collections import OrderedDict\n\nfrom django.forms import Media, widgets\nfrom django.utils.functional import cached_property\n\nfrom wagtail.admin.edit_handlers import RichTextFieldPanel\nfrom wagtail.admin.rich_text.converters.editor_html import EditorHTMLConverter\nfrom wagtail.admin.staticfiles import versioned_static\nfrom wagtail.core.rich_text import features\n\n\nclass HalloPlugin:\n def __init__(self, **kwargs):\n self.name = kwargs.get('name', None)\n self.options = kwargs.get('options', {})\n self.js = kwargs.get('js', [])\n self.css = kwargs.get('css', {})\n self.order = kwargs.get('order', 100)\n\n def construct_plugins_list(self, plugins):\n if self.name is not None:\n plugins[self.name] = self.options\n\n @property\n def media(self):\n js = [versioned_static(js_file) for js_file in self.js]\n css = {}\n for media_type, css_files in self.css.items():\n css[media_type] = [versioned_static(css_file) for css_file in css_files]\n\n return Media(js=js, css=css)\n\n\nclass HalloFormatPlugin(HalloPlugin):\n def __init__(self, **kwargs):\n kwargs.setdefault('name', 'halloformat')\n kwargs.setdefault('order', 10)\n self.format_name = kwargs['format_name']\n super().__init__(**kwargs)\n\n def construct_plugins_list(self, plugins):\n plugins.setdefault(self.name, {'formattings': {\n 'bold': False, 'italic': False, 'strikeThrough': False, 'underline': False\n }})\n plugins[self.name]['formattings'][self.format_name] = True\n\n\nclass HalloHeadingPlugin(HalloPlugin):\n default_order = 20\n\n def __init__(self, **kwargs):\n kwargs.setdefault('name', 'halloheadings')\n kwargs.setdefault('order', self.default_order)\n self.element = kwargs.pop('element')\n super().__init__(**kwargs)\n\n def construct_plugins_list(self, plugins):\n plugins.setdefault(self.name, {'formatBlocks': []})\n plugins[self.name]['formatBlocks'].append(self.element)\n\n\nclass HalloListPlugin(HalloPlugin):\n def __init__(self, **kwargs):\n kwargs.setdefault('name', 'hallolists')\n kwargs.setdefault('order', 40)\n self.list_type = kwargs['list_type']\n super().__init__(**kwargs)\n\n def construct_plugins_list(self, plugins):\n plugins.setdefault(self.name, {'lists': {\n 'ordered': False, 'unordered': False\n }})\n plugins[self.name]['lists'][self.list_type] = True\n\n\nclass HalloRequireParagraphsPlugin(HalloPlugin):\n @property\n def media(self):\n return Media(js=[\n versioned_static('wagtailadmin/js/hallo-plugins/hallo-requireparagraphs.js'),\n ]) + super().media\n\n\n# Plugins which are always imported, and cannot be enabled/disabled via 'features'\nCORE_HALLO_PLUGINS = [\n HalloPlugin(name='halloreundo', order=50),\n HalloRequireParagraphsPlugin(name='hallorequireparagraphs'),\n HalloHeadingPlugin(element='p')\n]\n\n\nclass HalloRichTextArea(widgets.Textarea):\n template_name = 'wagtailadmin/widgets/hallo_rich_text_area.html'\n\n # this class's constructor accepts a 'features' kwarg\n accepts_features = True\n\n def get_panel(self):\n return RichTextFieldPanel\n\n def __init__(self, *args, **kwargs):\n self.options = kwargs.pop('options', None)\n\n self.features = kwargs.pop('features', None)\n if self.features is None:\n self.features = features.get_default_features()\n\n self.converter = EditorHTMLConverter(self.features)\n\n # construct a list of plugin objects, by querying the feature registry\n # and keeping the non-null responses from get_editor_plugin\n self.plugins = CORE_HALLO_PLUGINS + list(filter(None, [\n features.get_editor_plugin('hallo', feature_name)\n for feature_name in self.features\n ]))\n self.plugins.sort(key=lambda plugin: plugin.order)\n\n super().__init__(*args, **kwargs)\n\n def format_value(self, value):\n # Convert database rich text representation to the format required by\n # the input field\n value = super().format_value(value)\n\n if value is None:\n return None\n\n return self.converter.from_database_format(value)\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n\n if self.options is not None and 'plugins' in self.options:\n # explicit 'plugins' config passed in options, so use that\n plugin_data = self.options['plugins']\n else:\n plugin_data = OrderedDict()\n for plugin in self.plugins:\n plugin.construct_plugins_list(plugin_data)\n context['widget']['plugins_json'] = json.dumps(plugin_data)\n\n return context\n\n def value_from_datadict(self, data, files, name):\n original_value = super().value_from_datadict(data, files, name)\n if original_value is None:\n return None\n return self.converter.to_database_format(original_value)\n\n @cached_property\n def media(self):\n media = Media(js=[\n versioned_static('wagtailadmin/js/vendor/hallo.js'),\n versioned_static('wagtailadmin/js/hallo-bootstrap.js'),\n ], css={\n 'all': [versioned_static('wagtailadmin/css/panels/hallo.css')]\n })\n\n for plugin in self.plugins:\n media += plugin.media\n\n return media\n", "path": "wagtail/admin/rich_text/editors/hallo.py"}]} | 2,387 | 203 |
gh_patches_debug_23123 | rasdani/github-patches | git_diff | streamlink__streamlink-5762 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
plugins.vidio: 403 Client Error on stream token acquirement
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
Unable to open URL: https://www.vidio.com/live/204/tokens (403 Client Error: Forbidden for url: https://www.vidio.com/live/204/tokens?type=hls)
### Description
The live stream: https://www.vidio.com/live/204-sctv
the output: https://www.vidio.com/live/204/tokens (403 Client Error: Forbidden for url: https://www.vidio.com/live/204/tokens?type=hls)
It is missing sctv
### Debug log
```text
streamlink https://www.vidio.com/live/204-sctv best
[cli][info] Found matching plugin vidio for URL https://www.vidio.com/live/204-sctv
error: Unable to open URL: https://www.vidio.com/live/204/tokens (403 Client Error: Forbidden for url: https://www.vidio.com/live/204/tokens?type=hls)
```
</issue>
<code>
[start of src/streamlink/plugins/vidio.py]
1 """
2 $description Indonesian & international live TV channels and video on-demand service. OTT service from Vidio.
3 $url vidio.com
4 $type live, vod
5 """
6 import logging
7 import re
8 from urllib.parse import urlsplit, urlunsplit
9
10 from streamlink.plugin import Plugin, pluginmatcher
11 from streamlink.plugin.api import validate
12 from streamlink.stream.dash import DASHStream
13 from streamlink.stream.hls import HLSStream
14
15
16 log = logging.getLogger(__name__)
17
18
19 @pluginmatcher(re.compile(
20 r"https?://(?:www\.)?vidio\.com/",
21 ))
22 class Vidio(Plugin):
23 tokens_url = "https://www.vidio.com/live/{id}/tokens"
24
25 def _get_stream_token(self, stream_id, stream_type):
26 log.debug("Getting stream token")
27 return self.session.http.post(
28 self.tokens_url.format(id=stream_id),
29 params={"type": stream_type},
30 headers={"Referer": self.url},
31 schema=validate.Schema(
32 validate.parse_json(),
33 {"token": str},
34 validate.get("token"),
35 ),
36 )
37
38 def _get_streams(self):
39 stream_id, has_token, hls_url, dash_url = self.session.http.get(
40 self.url,
41 schema=validate.Schema(
42 validate.parse_html(),
43 validate.xml_find(".//*[@data-video-id]"),
44 validate.union((
45 validate.get("data-video-id"),
46 validate.all(
47 validate.get("data-video-has-token"),
48 validate.transform(lambda val: val and val != "false"),
49 ),
50 validate.get("data-vjs-clip-hls-url"),
51 validate.get("data-vjs-clip-dash-url"),
52 )),
53 ),
54 )
55
56 if dash_url and has_token:
57 token = self._get_stream_token(stream_id, "dash")
58 parsed = urlsplit(dash_url)
59 dash_url = urlunsplit(parsed._replace(path=f"{token}{parsed.path}"))
60 return DASHStream.parse_manifest(
61 self.session,
62 dash_url,
63 headers={"Referer": "https://www.vidio.com/"},
64 )
65
66 if not hls_url:
67 return
68
69 if has_token:
70 token = self._get_stream_token(stream_id, "hls")
71 hls_url = f"{hls_url}?{token}"
72
73 return HLSStream.parse_variant_playlist(
74 self.session,
75 hls_url,
76 headers={"Referer": "https://www.vidio.com/"},
77 )
78
79
80 __plugin__ = Vidio
81
[end of src/streamlink/plugins/vidio.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/streamlink/plugins/vidio.py b/src/streamlink/plugins/vidio.py
--- a/src/streamlink/plugins/vidio.py
+++ b/src/streamlink/plugins/vidio.py
@@ -6,6 +6,7 @@
import logging
import re
from urllib.parse import urlsplit, urlunsplit
+from uuid import uuid4
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
@@ -17,7 +18,7 @@
@pluginmatcher(re.compile(
- r"https?://(?:www\.)?vidio\.com/",
+ r"https?://(?:www\.)?vidio\.com/.+",
))
class Vidio(Plugin):
tokens_url = "https://www.vidio.com/live/{id}/tokens"
@@ -28,6 +29,10 @@
self.tokens_url.format(id=stream_id),
params={"type": stream_type},
headers={"Referer": self.url},
+ cookies={
+ "ahoy_visit": str(uuid4()),
+ "ahoy_visitor": str(uuid4()),
+ },
schema=validate.Schema(
validate.parse_json(),
{"token": str},
| {"golden_diff": "diff --git a/src/streamlink/plugins/vidio.py b/src/streamlink/plugins/vidio.py\n--- a/src/streamlink/plugins/vidio.py\n+++ b/src/streamlink/plugins/vidio.py\n@@ -6,6 +6,7 @@\n import logging\n import re\n from urllib.parse import urlsplit, urlunsplit\n+from uuid import uuid4\n \n from streamlink.plugin import Plugin, pluginmatcher\n from streamlink.plugin.api import validate\n@@ -17,7 +18,7 @@\n \n \n @pluginmatcher(re.compile(\n- r\"https?://(?:www\\.)?vidio\\.com/\",\n+ r\"https?://(?:www\\.)?vidio\\.com/.+\",\n ))\n class Vidio(Plugin):\n tokens_url = \"https://www.vidio.com/live/{id}/tokens\"\n@@ -28,6 +29,10 @@\n self.tokens_url.format(id=stream_id),\n params={\"type\": stream_type},\n headers={\"Referer\": self.url},\n+ cookies={\n+ \"ahoy_visit\": str(uuid4()),\n+ \"ahoy_visitor\": str(uuid4()),\n+ },\n schema=validate.Schema(\n validate.parse_json(),\n {\"token\": str},\n", "issue": "plugins.vidio: 403 Client Error on stream token acquirement\n### Checklist\n\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\n\n### Streamlink version\n\nUnable to open URL: https://www.vidio.com/live/204/tokens (403 Client Error: Forbidden for url: https://www.vidio.com/live/204/tokens?type=hls)\n\n### Description\n\nThe live stream: https://www.vidio.com/live/204-sctv\r\nthe output: https://www.vidio.com/live/204/tokens (403 Client Error: Forbidden for url: https://www.vidio.com/live/204/tokens?type=hls)\r\n\r\nIt is missing sctv\n\n### Debug log\n\n```text\nstreamlink https://www.vidio.com/live/204-sctv best\r\n[cli][info] Found matching plugin vidio for URL https://www.vidio.com/live/204-sctv\r\nerror: Unable to open URL: https://www.vidio.com/live/204/tokens (403 Client Error: Forbidden for url: https://www.vidio.com/live/204/tokens?type=hls)\n```\n\n", "before_files": [{"content": "\"\"\"\n$description Indonesian & international live TV channels and video on-demand service. OTT service from Vidio.\n$url vidio.com\n$type live, vod\n\"\"\"\nimport logging\nimport re\nfrom urllib.parse import urlsplit, urlunsplit\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.dash import DASHStream\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(\n r\"https?://(?:www\\.)?vidio\\.com/\",\n))\nclass Vidio(Plugin):\n tokens_url = \"https://www.vidio.com/live/{id}/tokens\"\n\n def _get_stream_token(self, stream_id, stream_type):\n log.debug(\"Getting stream token\")\n return self.session.http.post(\n self.tokens_url.format(id=stream_id),\n params={\"type\": stream_type},\n headers={\"Referer\": self.url},\n schema=validate.Schema(\n validate.parse_json(),\n {\"token\": str},\n validate.get(\"token\"),\n ),\n )\n\n def _get_streams(self):\n stream_id, has_token, hls_url, dash_url = self.session.http.get(\n self.url,\n schema=validate.Schema(\n validate.parse_html(),\n validate.xml_find(\".//*[@data-video-id]\"),\n validate.union((\n validate.get(\"data-video-id\"),\n validate.all(\n validate.get(\"data-video-has-token\"),\n validate.transform(lambda val: val and val != \"false\"),\n ),\n validate.get(\"data-vjs-clip-hls-url\"),\n validate.get(\"data-vjs-clip-dash-url\"),\n )),\n ),\n )\n\n if dash_url and has_token:\n token = self._get_stream_token(stream_id, \"dash\")\n parsed = urlsplit(dash_url)\n dash_url = urlunsplit(parsed._replace(path=f\"{token}{parsed.path}\"))\n return DASHStream.parse_manifest(\n self.session,\n dash_url,\n headers={\"Referer\": \"https://www.vidio.com/\"},\n )\n\n if not hls_url:\n return\n\n if has_token:\n token = self._get_stream_token(stream_id, \"hls\")\n hls_url = f\"{hls_url}?{token}\"\n\n return HLSStream.parse_variant_playlist(\n self.session,\n hls_url,\n headers={\"Referer\": \"https://www.vidio.com/\"},\n )\n\n\n__plugin__ = Vidio\n", "path": "src/streamlink/plugins/vidio.py"}]} | 1,639 | 260 |
gh_patches_debug_19732 | rasdani/github-patches | git_diff | joke2k__faker-1368 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Random sentence of length 1 fails
* Faker version: 5.5.0
* OS: Ubuntu 20.04.1
Creating a random sentence of length 1 fails because it attempts to titleize the first word of the sentence, but `random_choices` is returning a tuple which is immutable.
This also occurs sometimes with 2 or 3 length sentences, although I presume that under the hood they are randomly selecting sentences of length 1.
This works as expected in Faker version 5.4.1.
### Steps to reproduce
```
>>> import faker
>>> thing = faker.Faker()
>>> print(thing.sentence(1))
```
### Expected behavior
A random titleized word is printed with a fullstop
### Actual behavior
```
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "~/.../venv/lib/python3.6/site-packages/faker/providers/lorem/__init__.py", line 86, in sentence
words[0] = words[0].title()
TypeError: 'tuple' object does not support item assignment
```
Random sentence of length 1 fails
* Faker version: 5.5.0
* OS: Ubuntu 20.04.1
Creating a random sentence of length 1 fails because it attempts to titleize the first word of the sentence, but `random_choices` is returning a tuple which is immutable.
This also occurs sometimes with 2 or 3 length sentences, although I presume that under the hood they are randomly selecting sentences of length 1.
This works as expected in Faker version 5.4.1.
### Steps to reproduce
```
>>> import faker
>>> thing = faker.Faker()
>>> print(thing.sentence(1))
```
### Expected behavior
A random titleized word is printed with a fullstop
### Actual behavior
```
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "~/.../venv/lib/python3.6/site-packages/faker/providers/lorem/__init__.py", line 86, in sentence
words[0] = words[0].title()
TypeError: 'tuple' object does not support item assignment
```
</issue>
<code>
[start of faker/providers/lorem/__init__.py]
1 from .. import BaseProvider
2
3 localized = True
4
5 # 'Latin' is the default locale
6 default_locale = 'la'
7
8
9 class Provider(BaseProvider):
10 """Implement default lorem provider for Faker.
11
12 .. important::
13 The default locale of the lorem provider is ``la``. When using a locale
14 without a localized lorem provider, the ``la`` lorem provider will be
15 used, so generated words will be in pseudo-Latin. The locale used for
16 the standard provider docs was ``en_US``, and ``en_US`` has a localized
17 lorem provider which is why the samples here show words in American
18 English.
19 """
20
21 word_connector = ' '
22 sentence_punctuation = '.'
23
24 def words(self, nb=3, ext_word_list=None, unique=False):
25 """Generate a list of words.
26
27 The ``nb`` argument controls the number of words in the resulting list,
28 and if ``ext_word_list`` is provided, words from that list will be used
29 instead of those from the locale provider's built-in word list.
30
31 If ``unique`` is ``True``, this method will return a list containing
32 unique words. Under the hood, |random_sample| will be used for sampling
33 without replacement. If ``unique`` is ``False``, |random_choices| is
34 used instead, and the list returned may contain duplicates.
35
36 .. warning::
37 Depending on the length of a locale provider's built-in word list or
38 on the length of ``ext_word_list`` if provided, a large ``nb`` can
39 exhaust said lists if ``unique`` is ``True``, raising an exception.
40
41 :sample:
42 :sample: nb=5
43 :sample: nb=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']
44 :sample: nb=4, ext_word_list=['abc', 'def', 'ghi', 'jkl'], unique=True
45 """
46 word_list = ext_word_list if ext_word_list else self.word_list
47 if unique:
48 return self.random_sample(word_list, length=nb)
49 return self.random_choices(word_list, length=nb)
50
51 def word(self, ext_word_list=None):
52 """Generate a word.
53
54 This method uses |words| under the hood with the ``nb`` argument set to
55 ``1`` to generate the result.
56
57 :sample:
58 :sample: ext_word_list=['abc', 'def', 'ghi', 'jkl']
59 """
60 return self.words(1, ext_word_list)[0]
61
62 def sentence(self, nb_words=6, variable_nb_words=True, ext_word_list=None):
63 """Generate a sentence.
64
65 The ``nb_words`` argument controls how many words the sentence will
66 contain, and setting ``variable_nb_words`` to ``False`` will generate
67 the exact amount, while setting it to ``True`` (default) will generate
68 a random amount (+/-40%, minimum of 1) using |randomize_nb_elements|.
69
70 Under the hood, |words| is used to generate the words, so the argument
71 ``ext_word_list`` works in the same way here as it would in that method.
72
73 :sample: nb_words=10
74 :sample: nb_words=10, variable_nb_words=False
75 :sample: nb_words=10, ext_word_list=['abc', 'def', 'ghi', 'jkl']
76 :sample: nb_words=10, variable_nb_words=True,
77 ext_word_list=['abc', 'def', 'ghi', 'jkl']
78 """
79 if nb_words <= 0:
80 return ''
81
82 if variable_nb_words:
83 nb_words = self.randomize_nb_elements(nb_words, min=1)
84
85 words = self.words(nb=nb_words, ext_word_list=ext_word_list)
86 words[0] = words[0].title()
87
88 return self.word_connector.join(words) + self.sentence_punctuation
89
90 def sentences(self, nb=3, ext_word_list=None):
91 """Generate a list of sentences.
92
93 This method uses |sentence| under the hood to generate sentences, and
94 the ``nb`` argument controls exactly how many sentences the list will
95 contain. The ``ext_word_list`` argument works in exactly the same way
96 as well.
97
98 :sample:
99 :sample: nb=5
100 :sample: nb=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']
101 """
102 return [self.sentence(ext_word_list=ext_word_list)
103 for _ in range(0, nb)]
104
105 def paragraph(
106 self,
107 nb_sentences=3,
108 variable_nb_sentences=True,
109 ext_word_list=None):
110 """Generate a paragraph.
111
112 The ``nb_sentences`` argument controls how many sentences the paragraph
113 will contain, and setting ``variable_nb_sentences`` to ``False`` will
114 generate the exact amount, while setting it to ``True`` (default) will
115 generate a random amount (+/-40%, minimum of 1) using
116 |randomize_nb_elements|.
117
118 Under the hood, |sentences| is used to generate the sentences, so the
119 argument ``ext_word_list`` works in the same way here as it would in
120 that method.
121
122 :sample: nb_sentences=5
123 :sample: nb_sentences=5, variable_nb_sentences=False
124 :sample: nb_sentences=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']
125 :sample: nb_sentences=5, variable_nb_sentences=False,
126 ext_word_list=['abc', 'def', 'ghi', 'jkl']
127 """
128 if nb_sentences <= 0:
129 return ''
130
131 if variable_nb_sentences:
132 nb_sentences = self.randomize_nb_elements(nb_sentences, min=1)
133
134 para = self.word_connector.join(self.sentences(
135 nb_sentences, ext_word_list=ext_word_list,
136 ))
137
138 return para
139
140 def paragraphs(self, nb=3, ext_word_list=None):
141 """Generate a list of paragraphs.
142
143 This method uses |paragraph| under the hood to generate paragraphs, and
144 the ``nb`` argument controls exactly how many sentences the list will
145 contain. The ``ext_word_list`` argument works in exactly the same way
146 as well.
147
148 :sample: nb=5
149 :sample: nb=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']
150 """
151 return [self.paragraph(ext_word_list=ext_word_list)
152 for _ in range(0, nb)]
153
154 def text(self, max_nb_chars=200, ext_word_list=None):
155 """Generate a text string.
156
157 The ``max_nb_chars`` argument controls the approximate number of
158 characters the text string will have, and depending on its value, this
159 method may use either |words|, |sentences|, or |paragraphs| for text
160 generation. The ``ext_word_list`` argument works in exactly the same way
161 it would in any of those methods.
162
163 :sample: max_nb_chars=20
164 :sample: max_nb_chars=80
165 :sample: max_nb_chars=160
166 :sample: ext_word_list=['abc', 'def', 'ghi', 'jkl']
167 """
168 text = []
169 if max_nb_chars < 5:
170 raise ValueError(
171 'text() can only generate text of at least 5 characters')
172
173 if max_nb_chars < 25:
174 # join words
175 while not text:
176 size = 0
177 # determine how many words are needed to reach the $max_nb_chars
178 # once;
179 while size < max_nb_chars:
180 word = (self.word_connector if size else '') + \
181 self.word(ext_word_list=ext_word_list)
182 text.append(word)
183 size += len(word)
184 text.pop()
185 text[0] = text[0][0].upper() + text[0][1:]
186 last_index = len(text) - 1
187 text[last_index] += self.sentence_punctuation
188 elif max_nb_chars < 100:
189 # join sentences
190 while not text:
191 size = 0
192 # determine how many sentences are needed to reach the
193 # $max_nb_chars once
194 while size < max_nb_chars:
195 sentence = (self.word_connector if size else '') + \
196 self.sentence(ext_word_list=ext_word_list)
197 text.append(sentence)
198 size += len(sentence)
199 text.pop()
200 else:
201 # join paragraphs
202 while not text:
203 size = 0
204 # determine how many paragraphs are needed to reach the
205 # $max_nb_chars once
206 while size < max_nb_chars:
207 paragraph = ('\n' if size else '') + \
208 self.paragraph(ext_word_list=ext_word_list)
209 text.append(paragraph)
210 size += len(paragraph)
211 text.pop()
212
213 return "".join(text)
214
215 def texts(self, nb_texts=3, max_nb_chars=200, ext_word_list=None):
216 """Generate a list of text strings.
217
218 The ``nb_texts`` argument controls how many text strings the list will
219 contain, and this method uses |text| under the hood for text generation,
220 so the two remaining arguments, ``max_nb_chars`` and ``ext_word_list``
221 will work in exactly the same way as well.
222
223 :sample: nb_texts=5
224 :sample: nb_texts=5, max_nb_chars=50
225 :sample: nb_texts=5, max_nb_chars=50,
226 ext_word_list=['abc', 'def', 'ghi', 'jkl']
227 """
228 return [self.text(max_nb_chars, ext_word_list)
229 for _ in range(0, nb_texts)]
230
[end of faker/providers/lorem/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/faker/providers/lorem/__init__.py b/faker/providers/lorem/__init__.py
--- a/faker/providers/lorem/__init__.py
+++ b/faker/providers/lorem/__init__.py
@@ -22,7 +22,7 @@
sentence_punctuation = '.'
def words(self, nb=3, ext_word_list=None, unique=False):
- """Generate a list of words.
+ """Generate a tuple of words.
The ``nb`` argument controls the number of words in the resulting list,
and if ``ext_word_list`` is provided, words from that list will be used
@@ -82,7 +82,7 @@
if variable_nb_words:
nb_words = self.randomize_nb_elements(nb_words, min=1)
- words = self.words(nb=nb_words, ext_word_list=ext_word_list)
+ words = list(self.words(nb=nb_words, ext_word_list=ext_word_list))
words[0] = words[0].title()
return self.word_connector.join(words) + self.sentence_punctuation
| {"golden_diff": "diff --git a/faker/providers/lorem/__init__.py b/faker/providers/lorem/__init__.py\n--- a/faker/providers/lorem/__init__.py\n+++ b/faker/providers/lorem/__init__.py\n@@ -22,7 +22,7 @@\n sentence_punctuation = '.'\n \n def words(self, nb=3, ext_word_list=None, unique=False):\n- \"\"\"Generate a list of words.\n+ \"\"\"Generate a tuple of words.\n \n The ``nb`` argument controls the number of words in the resulting list,\n and if ``ext_word_list`` is provided, words from that list will be used\n@@ -82,7 +82,7 @@\n if variable_nb_words:\n nb_words = self.randomize_nb_elements(nb_words, min=1)\n \n- words = self.words(nb=nb_words, ext_word_list=ext_word_list)\n+ words = list(self.words(nb=nb_words, ext_word_list=ext_word_list))\n words[0] = words[0].title()\n \n return self.word_connector.join(words) + self.sentence_punctuation\n", "issue": "Random sentence of length 1 fails\n* Faker version: 5.5.0\r\n* OS: Ubuntu 20.04.1\r\n\r\nCreating a random sentence of length 1 fails because it attempts to titleize the first word of the sentence, but `random_choices` is returning a tuple which is immutable. \r\n\r\nThis also occurs sometimes with 2 or 3 length sentences, although I presume that under the hood they are randomly selecting sentences of length 1.\r\n\r\nThis works as expected in Faker version 5.4.1.\r\n\r\n### Steps to reproduce\r\n\r\n```\r\n>>> import faker\r\n>>> thing = faker.Faker()\r\n>>> print(thing.sentence(1))\r\n```\r\n\r\n### Expected behavior\r\n\r\nA random titleized word is printed with a fullstop\r\n\r\n### Actual behavior\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<input>\", line 1, in <module>\r\n File \"~/.../venv/lib/python3.6/site-packages/faker/providers/lorem/__init__.py\", line 86, in sentence\r\n words[0] = words[0].title()\r\nTypeError: 'tuple' object does not support item assignment\r\n```\r\n\nRandom sentence of length 1 fails\n* Faker version: 5.5.0\r\n* OS: Ubuntu 20.04.1\r\n\r\nCreating a random sentence of length 1 fails because it attempts to titleize the first word of the sentence, but `random_choices` is returning a tuple which is immutable. \r\n\r\nThis also occurs sometimes with 2 or 3 length sentences, although I presume that under the hood they are randomly selecting sentences of length 1.\r\n\r\nThis works as expected in Faker version 5.4.1.\r\n\r\n### Steps to reproduce\r\n\r\n```\r\n>>> import faker\r\n>>> thing = faker.Faker()\r\n>>> print(thing.sentence(1))\r\n```\r\n\r\n### Expected behavior\r\n\r\nA random titleized word is printed with a fullstop\r\n\r\n### Actual behavior\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<input>\", line 1, in <module>\r\n File \"~/.../venv/lib/python3.6/site-packages/faker/providers/lorem/__init__.py\", line 86, in sentence\r\n words[0] = words[0].title()\r\nTypeError: 'tuple' object does not support item assignment\r\n```\r\n\n", "before_files": [{"content": "from .. import BaseProvider\n\nlocalized = True\n\n# 'Latin' is the default locale\ndefault_locale = 'la'\n\n\nclass Provider(BaseProvider):\n \"\"\"Implement default lorem provider for Faker.\n\n .. important::\n The default locale of the lorem provider is ``la``. When using a locale\n without a localized lorem provider, the ``la`` lorem provider will be\n used, so generated words will be in pseudo-Latin. The locale used for\n the standard provider docs was ``en_US``, and ``en_US`` has a localized\n lorem provider which is why the samples here show words in American\n English.\n \"\"\"\n\n word_connector = ' '\n sentence_punctuation = '.'\n\n def words(self, nb=3, ext_word_list=None, unique=False):\n \"\"\"Generate a list of words.\n\n The ``nb`` argument controls the number of words in the resulting list,\n and if ``ext_word_list`` is provided, words from that list will be used\n instead of those from the locale provider's built-in word list.\n\n If ``unique`` is ``True``, this method will return a list containing\n unique words. Under the hood, |random_sample| will be used for sampling\n without replacement. If ``unique`` is ``False``, |random_choices| is\n used instead, and the list returned may contain duplicates.\n\n .. warning::\n Depending on the length of a locale provider's built-in word list or\n on the length of ``ext_word_list`` if provided, a large ``nb`` can\n exhaust said lists if ``unique`` is ``True``, raising an exception.\n\n :sample:\n :sample: nb=5\n :sample: nb=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']\n :sample: nb=4, ext_word_list=['abc', 'def', 'ghi', 'jkl'], unique=True\n \"\"\"\n word_list = ext_word_list if ext_word_list else self.word_list\n if unique:\n return self.random_sample(word_list, length=nb)\n return self.random_choices(word_list, length=nb)\n\n def word(self, ext_word_list=None):\n \"\"\"Generate a word.\n\n This method uses |words| under the hood with the ``nb`` argument set to\n ``1`` to generate the result.\n\n :sample:\n :sample: ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n return self.words(1, ext_word_list)[0]\n\n def sentence(self, nb_words=6, variable_nb_words=True, ext_word_list=None):\n \"\"\"Generate a sentence.\n\n The ``nb_words`` argument controls how many words the sentence will\n contain, and setting ``variable_nb_words`` to ``False`` will generate\n the exact amount, while setting it to ``True`` (default) will generate\n a random amount (+/-40%, minimum of 1) using |randomize_nb_elements|.\n\n Under the hood, |words| is used to generate the words, so the argument\n ``ext_word_list`` works in the same way here as it would in that method.\n\n :sample: nb_words=10\n :sample: nb_words=10, variable_nb_words=False\n :sample: nb_words=10, ext_word_list=['abc', 'def', 'ghi', 'jkl']\n :sample: nb_words=10, variable_nb_words=True,\n ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n if nb_words <= 0:\n return ''\n\n if variable_nb_words:\n nb_words = self.randomize_nb_elements(nb_words, min=1)\n\n words = self.words(nb=nb_words, ext_word_list=ext_word_list)\n words[0] = words[0].title()\n\n return self.word_connector.join(words) + self.sentence_punctuation\n\n def sentences(self, nb=3, ext_word_list=None):\n \"\"\"Generate a list of sentences.\n\n This method uses |sentence| under the hood to generate sentences, and\n the ``nb`` argument controls exactly how many sentences the list will\n contain. The ``ext_word_list`` argument works in exactly the same way\n as well.\n\n :sample:\n :sample: nb=5\n :sample: nb=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n return [self.sentence(ext_word_list=ext_word_list)\n for _ in range(0, nb)]\n\n def paragraph(\n self,\n nb_sentences=3,\n variable_nb_sentences=True,\n ext_word_list=None):\n \"\"\"Generate a paragraph.\n\n The ``nb_sentences`` argument controls how many sentences the paragraph\n will contain, and setting ``variable_nb_sentences`` to ``False`` will\n generate the exact amount, while setting it to ``True`` (default) will\n generate a random amount (+/-40%, minimum of 1) using\n |randomize_nb_elements|.\n\n Under the hood, |sentences| is used to generate the sentences, so the\n argument ``ext_word_list`` works in the same way here as it would in\n that method.\n\n :sample: nb_sentences=5\n :sample: nb_sentences=5, variable_nb_sentences=False\n :sample: nb_sentences=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']\n :sample: nb_sentences=5, variable_nb_sentences=False,\n ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n if nb_sentences <= 0:\n return ''\n\n if variable_nb_sentences:\n nb_sentences = self.randomize_nb_elements(nb_sentences, min=1)\n\n para = self.word_connector.join(self.sentences(\n nb_sentences, ext_word_list=ext_word_list,\n ))\n\n return para\n\n def paragraphs(self, nb=3, ext_word_list=None):\n \"\"\"Generate a list of paragraphs.\n\n This method uses |paragraph| under the hood to generate paragraphs, and\n the ``nb`` argument controls exactly how many sentences the list will\n contain. The ``ext_word_list`` argument works in exactly the same way\n as well.\n\n :sample: nb=5\n :sample: nb=5, ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n return [self.paragraph(ext_word_list=ext_word_list)\n for _ in range(0, nb)]\n\n def text(self, max_nb_chars=200, ext_word_list=None):\n \"\"\"Generate a text string.\n\n The ``max_nb_chars`` argument controls the approximate number of\n characters the text string will have, and depending on its value, this\n method may use either |words|, |sentences|, or |paragraphs| for text\n generation. The ``ext_word_list`` argument works in exactly the same way\n it would in any of those methods.\n\n :sample: max_nb_chars=20\n :sample: max_nb_chars=80\n :sample: max_nb_chars=160\n :sample: ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n text = []\n if max_nb_chars < 5:\n raise ValueError(\n 'text() can only generate text of at least 5 characters')\n\n if max_nb_chars < 25:\n # join words\n while not text:\n size = 0\n # determine how many words are needed to reach the $max_nb_chars\n # once;\n while size < max_nb_chars:\n word = (self.word_connector if size else '') + \\\n self.word(ext_word_list=ext_word_list)\n text.append(word)\n size += len(word)\n text.pop()\n text[0] = text[0][0].upper() + text[0][1:]\n last_index = len(text) - 1\n text[last_index] += self.sentence_punctuation\n elif max_nb_chars < 100:\n # join sentences\n while not text:\n size = 0\n # determine how many sentences are needed to reach the\n # $max_nb_chars once\n while size < max_nb_chars:\n sentence = (self.word_connector if size else '') + \\\n self.sentence(ext_word_list=ext_word_list)\n text.append(sentence)\n size += len(sentence)\n text.pop()\n else:\n # join paragraphs\n while not text:\n size = 0\n # determine how many paragraphs are needed to reach the\n # $max_nb_chars once\n while size < max_nb_chars:\n paragraph = ('\\n' if size else '') + \\\n self.paragraph(ext_word_list=ext_word_list)\n text.append(paragraph)\n size += len(paragraph)\n text.pop()\n\n return \"\".join(text)\n\n def texts(self, nb_texts=3, max_nb_chars=200, ext_word_list=None):\n \"\"\"Generate a list of text strings.\n\n The ``nb_texts`` argument controls how many text strings the list will\n contain, and this method uses |text| under the hood for text generation,\n so the two remaining arguments, ``max_nb_chars`` and ``ext_word_list``\n will work in exactly the same way as well.\n\n :sample: nb_texts=5\n :sample: nb_texts=5, max_nb_chars=50\n :sample: nb_texts=5, max_nb_chars=50,\n ext_word_list=['abc', 'def', 'ghi', 'jkl']\n \"\"\"\n return [self.text(max_nb_chars, ext_word_list)\n for _ in range(0, nb_texts)]\n", "path": "faker/providers/lorem/__init__.py"}]} | 3,736 | 245 |
gh_patches_debug_6989 | rasdani/github-patches | git_diff | vyperlang__vyper-2533 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Cannot access `.balance` of address constants
### What's your issue about?
If I try to cal the `balance` attribute of an address constant
```python
ADDR: constant(address) = 0x000000000000000000000000000000000000dEaD
@external
def foo() -> uint256:
return ADDR.balance
```
I get an unhandled typing exception
```python
...
~/.pyenv/versions/3.8.5/lib/python3.8/site-packages/vyper/parser/expr.py in __init__(self, node, context)
201 self.lll_node = fn()
202 if self.lll_node is None:
--> 203 raise TypeCheckFailure(f"{type(node).__name__} node did not produce LLL")
204
205 def parse_Int(self):
TypeCheckFailure: Name node did not produce LLL
This is an unhandled internal compiler error. Please create an issue on Github to notify the developers.
https://github.com/vyperlang/vyper/issues/new?template=bug.md
```
</issue>
<code>
[start of vyper/ast/folding.py]
1 from decimal import Decimal
2 from typing import Union
3
4 from vyper.ast import nodes as vy_ast
5 from vyper.builtin_functions import DISPATCH_TABLE
6 from vyper.exceptions import UnfoldableNode
7
8 BUILTIN_CONSTANTS = {
9 "EMPTY_BYTES32": (
10 vy_ast.Hex,
11 "0x0000000000000000000000000000000000000000000000000000000000000000",
12 ), # NOQA: E501
13 "ZERO_ADDRESS": (vy_ast.Hex, "0x0000000000000000000000000000000000000000"),
14 "MAX_INT128": (vy_ast.Int, 2 ** 127 - 1),
15 "MIN_INT128": (vy_ast.Int, -(2 ** 127)),
16 "MAX_DECIMAL": (vy_ast.Decimal, Decimal(2 ** 127 - 1)),
17 "MIN_DECIMAL": (vy_ast.Decimal, Decimal(-(2 ** 127))),
18 "MAX_UINT256": (vy_ast.Int, 2 ** 256 - 1),
19 }
20
21
22 def fold(vyper_module: vy_ast.Module) -> None:
23 """
24 Perform literal folding operations on a Vyper AST.
25
26 Arguments
27 ---------
28 vyper_module : Module
29 Top-level Vyper AST node.
30 """
31 replace_builtin_constants(vyper_module)
32
33 changed_nodes = 1
34 while changed_nodes:
35 changed_nodes = 0
36 changed_nodes += replace_user_defined_constants(vyper_module)
37 changed_nodes += replace_literal_ops(vyper_module)
38 changed_nodes += replace_subscripts(vyper_module)
39 changed_nodes += replace_builtin_functions(vyper_module)
40
41
42 def replace_literal_ops(vyper_module: vy_ast.Module) -> int:
43 """
44 Find and evaluate operation and comparison nodes within the Vyper AST,
45 replacing them with Constant nodes where possible.
46
47 Arguments
48 ---------
49 vyper_module : Module
50 Top-level Vyper AST node.
51
52 Returns
53 -------
54 int
55 Number of nodes that were replaced.
56 """
57 changed_nodes = 0
58
59 node_types = (vy_ast.BoolOp, vy_ast.BinOp, vy_ast.UnaryOp, vy_ast.Compare)
60 for node in vyper_module.get_descendants(node_types, reverse=True):
61 try:
62 new_node = node.evaluate()
63 except UnfoldableNode:
64 continue
65
66 changed_nodes += 1
67 vyper_module.replace_in_tree(node, new_node)
68
69 return changed_nodes
70
71
72 def replace_subscripts(vyper_module: vy_ast.Module) -> int:
73 """
74 Find and evaluate Subscript nodes within the Vyper AST, replacing them with
75 Constant nodes where possible.
76
77 Arguments
78 ---------
79 vyper_module : Module
80 Top-level Vyper AST node.
81
82 Returns
83 -------
84 int
85 Number of nodes that were replaced.
86 """
87 changed_nodes = 0
88
89 for node in vyper_module.get_descendants(vy_ast.Subscript, reverse=True):
90 try:
91 new_node = node.evaluate()
92 except UnfoldableNode:
93 continue
94
95 changed_nodes += 1
96 vyper_module.replace_in_tree(node, new_node)
97
98 return changed_nodes
99
100
101 def replace_builtin_functions(vyper_module: vy_ast.Module) -> int:
102 """
103 Find and evaluate builtin function calls within the Vyper AST, replacing
104 them with Constant nodes where possible.
105
106 Arguments
107 ---------
108 vyper_module : Module
109 Top-level Vyper AST node.
110
111 Returns
112 -------
113 int
114 Number of nodes that were replaced.
115 """
116 changed_nodes = 0
117
118 for node in vyper_module.get_descendants(vy_ast.Call, reverse=True):
119 if not isinstance(node.func, vy_ast.Name):
120 continue
121
122 name = node.func.id
123 func = DISPATCH_TABLE.get(name)
124 if func is None or not hasattr(func, "evaluate"):
125 continue
126 try:
127 new_node = func.evaluate(node) # type: ignore
128 except UnfoldableNode:
129 continue
130
131 changed_nodes += 1
132 vyper_module.replace_in_tree(node, new_node)
133
134 return changed_nodes
135
136
137 def replace_builtin_constants(vyper_module: vy_ast.Module) -> None:
138 """
139 Replace references to builtin constants with their literal values.
140
141 Arguments
142 ---------
143 vyper_module : Module
144 Top-level Vyper AST node.
145 """
146 for name, (node, value) in BUILTIN_CONSTANTS.items():
147 replace_constant(vyper_module, name, node(value=value), True) # type: ignore
148
149
150 def replace_user_defined_constants(vyper_module: vy_ast.Module) -> int:
151 """
152 Find user-defined constant assignments, and replace references
153 to the constants with their literal values.
154
155 Arguments
156 ---------
157 vyper_module : Module
158 Top-level Vyper AST node.
159
160 Returns
161 -------
162 int
163 Number of nodes that were replaced.
164 """
165 changed_nodes = 0
166
167 for node in vyper_module.get_children(vy_ast.AnnAssign):
168 if not isinstance(node.target, vy_ast.Name):
169 # left-hand-side of assignment is not a variable
170 continue
171 if node.get("annotation.func.id") != "constant":
172 # annotation is not wrapped in `constant(...)`
173 continue
174
175 changed_nodes += replace_constant(vyper_module, node.target.id, node.value, False)
176
177 return changed_nodes
178
179
180 # TODO constant folding on log events
181
182
183 def _replace(old_node, new_node):
184 if isinstance(new_node, vy_ast.Constant):
185 return new_node.from_node(old_node, value=new_node.value)
186 elif isinstance(new_node, vy_ast.List):
187 list_values = [_replace(old_node, i) for i in new_node.elements]
188 return new_node.from_node(old_node, elements=list_values)
189 else:
190 raise UnfoldableNode
191
192
193 def replace_constant(
194 vyper_module: vy_ast.Module,
195 id_: str,
196 replacement_node: Union[vy_ast.Constant, vy_ast.List],
197 raise_on_error: bool,
198 ) -> int:
199 """
200 Replace references to a variable name with a literal value.
201
202 Arguments
203 ---------
204 vyper_module : Module
205 Module-level ast node to perform replacement in.
206 id_ : str
207 String representing the `.id` attribute of the node(s) to be replaced.
208 replacement_node : Constant | List
209 Vyper ast node representing the literal value to be substituted in.
210 raise_on_error: bool
211 Boolean indicating if `UnfoldableNode` exception should be raised or ignored.
212
213 Returns
214 -------
215 int
216 Number of nodes that were replaced.
217 """
218 changed_nodes = 0
219
220 for node in vyper_module.get_descendants(vy_ast.Name, {"id": id_}, reverse=True):
221 parent = node.get_ancestor()
222
223 if isinstance(parent, vy_ast.Attribute):
224 # do not replace attributes
225 continue
226 if isinstance(parent, vy_ast.Call) and node == parent.func:
227 # do not replace calls
228 continue
229
230 # do not replace dictionary keys
231 if isinstance(parent, vy_ast.Dict) and node in parent.keys:
232 continue
233
234 if not node.get_ancestor(vy_ast.Index):
235 # do not replace left-hand side of assignments
236 assign = node.get_ancestor((vy_ast.Assign, vy_ast.AnnAssign, vy_ast.AugAssign))
237
238 if assign and node in assign.target.get_descendants(include_self=True):
239 continue
240
241 try:
242 new_node = _replace(node, replacement_node)
243 except UnfoldableNode:
244 if raise_on_error:
245 raise
246 continue
247
248 changed_nodes += 1
249 vyper_module.replace_in_tree(node, new_node)
250
251 return changed_nodes
252
[end of vyper/ast/folding.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/vyper/ast/folding.py b/vyper/ast/folding.py
--- a/vyper/ast/folding.py
+++ b/vyper/ast/folding.py
@@ -220,9 +220,6 @@
for node in vyper_module.get_descendants(vy_ast.Name, {"id": id_}, reverse=True):
parent = node.get_ancestor()
- if isinstance(parent, vy_ast.Attribute):
- # do not replace attributes
- continue
if isinstance(parent, vy_ast.Call) and node == parent.func:
# do not replace calls
continue
| {"golden_diff": "diff --git a/vyper/ast/folding.py b/vyper/ast/folding.py\n--- a/vyper/ast/folding.py\n+++ b/vyper/ast/folding.py\n@@ -220,9 +220,6 @@\n for node in vyper_module.get_descendants(vy_ast.Name, {\"id\": id_}, reverse=True):\n parent = node.get_ancestor()\n \n- if isinstance(parent, vy_ast.Attribute):\n- # do not replace attributes\n- continue\n if isinstance(parent, vy_ast.Call) and node == parent.func:\n # do not replace calls\n continue\n", "issue": "Cannot access `.balance` of address constants\n### What's your issue about?\r\nIf I try to cal the `balance` attribute of an address constant\r\n```python\r\nADDR: constant(address) = 0x000000000000000000000000000000000000dEaD\r\n\r\n@external\r\ndef foo() -> uint256:\r\n return ADDR.balance\r\n```\r\n\r\nI get an unhandled typing exception\r\n```python\r\n...\r\n~/.pyenv/versions/3.8.5/lib/python3.8/site-packages/vyper/parser/expr.py in __init__(self, node, context)\r\n 201 self.lll_node = fn()\r\n 202 if self.lll_node is None:\r\n--> 203 raise TypeCheckFailure(f\"{type(node).__name__} node did not produce LLL\")\r\n 204 \r\n 205 def parse_Int(self):\r\n\r\nTypeCheckFailure: Name node did not produce LLL\r\n\r\nThis is an unhandled internal compiler error. Please create an issue on Github to notify the developers.\r\nhttps://github.com/vyperlang/vyper/issues/new?template=bug.md\r\n```\n", "before_files": [{"content": "from decimal import Decimal\nfrom typing import Union\n\nfrom vyper.ast import nodes as vy_ast\nfrom vyper.builtin_functions import DISPATCH_TABLE\nfrom vyper.exceptions import UnfoldableNode\n\nBUILTIN_CONSTANTS = {\n \"EMPTY_BYTES32\": (\n vy_ast.Hex,\n \"0x0000000000000000000000000000000000000000000000000000000000000000\",\n ), # NOQA: E501\n \"ZERO_ADDRESS\": (vy_ast.Hex, \"0x0000000000000000000000000000000000000000\"),\n \"MAX_INT128\": (vy_ast.Int, 2 ** 127 - 1),\n \"MIN_INT128\": (vy_ast.Int, -(2 ** 127)),\n \"MAX_DECIMAL\": (vy_ast.Decimal, Decimal(2 ** 127 - 1)),\n \"MIN_DECIMAL\": (vy_ast.Decimal, Decimal(-(2 ** 127))),\n \"MAX_UINT256\": (vy_ast.Int, 2 ** 256 - 1),\n}\n\n\ndef fold(vyper_module: vy_ast.Module) -> None:\n \"\"\"\n Perform literal folding operations on a Vyper AST.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n \"\"\"\n replace_builtin_constants(vyper_module)\n\n changed_nodes = 1\n while changed_nodes:\n changed_nodes = 0\n changed_nodes += replace_user_defined_constants(vyper_module)\n changed_nodes += replace_literal_ops(vyper_module)\n changed_nodes += replace_subscripts(vyper_module)\n changed_nodes += replace_builtin_functions(vyper_module)\n\n\ndef replace_literal_ops(vyper_module: vy_ast.Module) -> int:\n \"\"\"\n Find and evaluate operation and comparison nodes within the Vyper AST,\n replacing them with Constant nodes where possible.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n\n Returns\n -------\n int\n Number of nodes that were replaced.\n \"\"\"\n changed_nodes = 0\n\n node_types = (vy_ast.BoolOp, vy_ast.BinOp, vy_ast.UnaryOp, vy_ast.Compare)\n for node in vyper_module.get_descendants(node_types, reverse=True):\n try:\n new_node = node.evaluate()\n except UnfoldableNode:\n continue\n\n changed_nodes += 1\n vyper_module.replace_in_tree(node, new_node)\n\n return changed_nodes\n\n\ndef replace_subscripts(vyper_module: vy_ast.Module) -> int:\n \"\"\"\n Find and evaluate Subscript nodes within the Vyper AST, replacing them with\n Constant nodes where possible.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n\n Returns\n -------\n int\n Number of nodes that were replaced.\n \"\"\"\n changed_nodes = 0\n\n for node in vyper_module.get_descendants(vy_ast.Subscript, reverse=True):\n try:\n new_node = node.evaluate()\n except UnfoldableNode:\n continue\n\n changed_nodes += 1\n vyper_module.replace_in_tree(node, new_node)\n\n return changed_nodes\n\n\ndef replace_builtin_functions(vyper_module: vy_ast.Module) -> int:\n \"\"\"\n Find and evaluate builtin function calls within the Vyper AST, replacing\n them with Constant nodes where possible.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n\n Returns\n -------\n int\n Number of nodes that were replaced.\n \"\"\"\n changed_nodes = 0\n\n for node in vyper_module.get_descendants(vy_ast.Call, reverse=True):\n if not isinstance(node.func, vy_ast.Name):\n continue\n\n name = node.func.id\n func = DISPATCH_TABLE.get(name)\n if func is None or not hasattr(func, \"evaluate\"):\n continue\n try:\n new_node = func.evaluate(node) # type: ignore\n except UnfoldableNode:\n continue\n\n changed_nodes += 1\n vyper_module.replace_in_tree(node, new_node)\n\n return changed_nodes\n\n\ndef replace_builtin_constants(vyper_module: vy_ast.Module) -> None:\n \"\"\"\n Replace references to builtin constants with their literal values.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n \"\"\"\n for name, (node, value) in BUILTIN_CONSTANTS.items():\n replace_constant(vyper_module, name, node(value=value), True) # type: ignore\n\n\ndef replace_user_defined_constants(vyper_module: vy_ast.Module) -> int:\n \"\"\"\n Find user-defined constant assignments, and replace references\n to the constants with their literal values.\n\n Arguments\n ---------\n vyper_module : Module\n Top-level Vyper AST node.\n\n Returns\n -------\n int\n Number of nodes that were replaced.\n \"\"\"\n changed_nodes = 0\n\n for node in vyper_module.get_children(vy_ast.AnnAssign):\n if not isinstance(node.target, vy_ast.Name):\n # left-hand-side of assignment is not a variable\n continue\n if node.get(\"annotation.func.id\") != \"constant\":\n # annotation is not wrapped in `constant(...)`\n continue\n\n changed_nodes += replace_constant(vyper_module, node.target.id, node.value, False)\n\n return changed_nodes\n\n\n# TODO constant folding on log events\n\n\ndef _replace(old_node, new_node):\n if isinstance(new_node, vy_ast.Constant):\n return new_node.from_node(old_node, value=new_node.value)\n elif isinstance(new_node, vy_ast.List):\n list_values = [_replace(old_node, i) for i in new_node.elements]\n return new_node.from_node(old_node, elements=list_values)\n else:\n raise UnfoldableNode\n\n\ndef replace_constant(\n vyper_module: vy_ast.Module,\n id_: str,\n replacement_node: Union[vy_ast.Constant, vy_ast.List],\n raise_on_error: bool,\n) -> int:\n \"\"\"\n Replace references to a variable name with a literal value.\n\n Arguments\n ---------\n vyper_module : Module\n Module-level ast node to perform replacement in.\n id_ : str\n String representing the `.id` attribute of the node(s) to be replaced.\n replacement_node : Constant | List\n Vyper ast node representing the literal value to be substituted in.\n raise_on_error: bool\n Boolean indicating if `UnfoldableNode` exception should be raised or ignored.\n\n Returns\n -------\n int\n Number of nodes that were replaced.\n \"\"\"\n changed_nodes = 0\n\n for node in vyper_module.get_descendants(vy_ast.Name, {\"id\": id_}, reverse=True):\n parent = node.get_ancestor()\n\n if isinstance(parent, vy_ast.Attribute):\n # do not replace attributes\n continue\n if isinstance(parent, vy_ast.Call) and node == parent.func:\n # do not replace calls\n continue\n\n # do not replace dictionary keys\n if isinstance(parent, vy_ast.Dict) and node in parent.keys:\n continue\n\n if not node.get_ancestor(vy_ast.Index):\n # do not replace left-hand side of assignments\n assign = node.get_ancestor((vy_ast.Assign, vy_ast.AnnAssign, vy_ast.AugAssign))\n\n if assign and node in assign.target.get_descendants(include_self=True):\n continue\n\n try:\n new_node = _replace(node, replacement_node)\n except UnfoldableNode:\n if raise_on_error:\n raise\n continue\n\n changed_nodes += 1\n vyper_module.replace_in_tree(node, new_node)\n\n return changed_nodes\n", "path": "vyper/ast/folding.py"}]} | 3,215 | 134 |
gh_patches_debug_27102 | rasdani/github-patches | git_diff | fossasia__open-event-server-5627 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
copied events have no organizer
**Describe the bug**
<!-- A clear and concise description of what the bug is. -->
Currently, copied events have no associated event roles and are being showed with no organizers
**To Reproduce**
Steps to reproduce the behavior:
1. Go to any event
2. Click on copy
3. Go to Admin/Events
4. See error

**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
The copied event should also be organized by previous organiser himself
**Additional details (please complete the following information):**
- OS: [e.g. MacOS, Ubuntu, CentOS] Ubuntu
- Python Version [e.g. `3.5`, `3.6`] 3.5
**Additional context**
<!-- Add any other context about the problem here. -->
Working on it
</issue>
<code>
[start of app/api/event_copy.py]
1 from flask import jsonify, Blueprint, abort, make_response
2 from sqlalchemy.orm import make_transient
3
4 from app.api.helpers.db import safe_query
5 from app.api.helpers.files import create_save_resized_image
6 from app.api.helpers.permission_manager import has_access
7 from app.models.custom_form import CustomForms
8 from app.models.discount_code import DiscountCode
9 from app.models.event import Event, get_new_event_identifier
10 from app.models import db
11 from app.models.microlocation import Microlocation
12 from app.models.social_link import SocialLink
13 from app.models.speakers_call import SpeakersCall
14 from app.models.sponsor import Sponsor
15 from app.models.ticket import Ticket
16 from app.models.track import Track
17
18 event_copy = Blueprint('event_copy', __name__, url_prefix='/v1/events')
19
20
21 @event_copy.route('/<identifier>/copy', methods=['POST'])
22 def create_event_copy(identifier):
23 id = 'identifier'
24
25 if identifier.isdigit():
26 id = 'id'
27
28 event = safe_query(db, Event, id, identifier, 'event_' + id)
29
30 if not has_access('is_coorganizer', event_id=event.id):
31 return abort(
32 make_response(jsonify(error="Access Forbidden"), 403)
33 )
34 tickets = Ticket.query.filter_by(event_id=event.id).all()
35 social_links = SocialLink.query.filter_by(event_id=event.id).all()
36 sponsors = Sponsor.query.filter_by(event_id=event.id).all()
37 microlocations = Microlocation.query.filter_by(event_id=event.id).all()
38 tracks = Track.query.filter_by(event_id=event.id).all()
39 custom_forms = CustomForms.query.filter_by(event_id=event.id).all()
40 discount_codes = DiscountCode.query.filter_by(event_id=event.id).all()
41 speaker_calls = SpeakersCall.query.filter_by(event_id=event.id).all()
42
43 db.session.expunge(event) # expunge the object from session
44 make_transient(event)
45 delattr(event, 'id')
46 event.identifier = get_new_event_identifier()
47 db.session.add(event)
48 db.session.commit()
49
50 # Removes access_codes, order_tickets, ticket_tags for the new tickets created.
51 for ticket in tickets:
52 ticket_id = ticket.id
53 db.session.expunge(ticket) # expunge the object from session
54 make_transient(ticket)
55 ticket.event_id = event.id
56 delattr(ticket, 'id')
57 db.session.add(ticket)
58 db.session.commit()
59
60 for link in social_links:
61 link_id = link.id
62 db.session.expunge(link) # expunge the object from session
63 make_transient(link)
64 link.event_id = event.id
65 delattr(link, 'id')
66 db.session.add(link)
67 db.session.commit()
68
69 for sponsor in sponsors:
70 sponsor_id = sponsor.id
71 db.session.expunge(sponsor) # expunge the object from session
72 make_transient(sponsor)
73 sponsor.event_id = event.id
74 logo_url = create_save_resized_image(image_file=sponsor.logo_url, resize=False)
75 delattr(sponsor, 'id')
76 sponsor.logo_url = logo_url
77 db.session.add(sponsor)
78 db.session.commit()
79
80 for location in microlocations:
81 location_id = location.id
82 db.session.expunge(location) # expunge the object from session
83 make_transient(location)
84 location.event_id = event.id
85 delattr(location, 'id')
86 db.session.add(location)
87 db.session.commit()
88
89 # No sessions are copied for new tracks
90 for track in tracks:
91 track_id = track.id
92 db.session.expunge(track) # expunge the object from session
93 make_transient(track)
94 track.event_id = event.id
95 delattr(track, 'id')
96 db.session.add(track)
97 db.session.commit()
98
99 for call in speaker_calls:
100 call_id = call.id
101 db.session.expunge(call) # expunge the object from session
102 make_transient(call)
103 call.event_id = event.id
104 delattr(call, 'id')
105 db.session.add(call)
106 db.session.commit()
107
108 for code in discount_codes:
109 code_id = code.id
110 db.session.expunge(code) # expunge the object from session
111 make_transient(code)
112 code.event_id = event.id
113 delattr(code, 'id')
114 db.session.add(code)
115 db.session.commit()
116
117 for form in custom_forms:
118 form_id = form.id
119 db.session.expunge(form) # expunge the object from session
120 make_transient(form)
121 form.event_id = event.id
122 delattr(form, 'id')
123 db.session.add(form)
124 db.session.commit()
125
126 return jsonify({
127 'id': event.id,
128 'identifier': event.identifier,
129 "copied": True
130 })
131
[end of app/api/event_copy.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/app/api/event_copy.py b/app/api/event_copy.py
--- a/app/api/event_copy.py
+++ b/app/api/event_copy.py
@@ -14,6 +14,7 @@
from app.models.sponsor import Sponsor
from app.models.ticket import Ticket
from app.models.track import Track
+from app.models.users_events_role import UsersEventsRoles
event_copy = Blueprint('event_copy', __name__, url_prefix='/v1/events')
@@ -39,6 +40,7 @@
custom_forms = CustomForms.query.filter_by(event_id=event.id).all()
discount_codes = DiscountCode.query.filter_by(event_id=event.id).all()
speaker_calls = SpeakersCall.query.filter_by(event_id=event.id).all()
+ user_event_roles = UsersEventsRoles.query.filter_by(event_id=event.id).all()
db.session.expunge(event) # expunge the object from session
make_transient(event)
@@ -123,6 +125,14 @@
db.session.add(form)
db.session.commit()
+ for user_role in user_event_roles:
+ db.session.expunge(user_role)
+ make_transient(user_role)
+ user_role.event_id = event.id
+ delattr(user_role, 'id')
+ db.session.add(user_role)
+ db.session.commit()
+
return jsonify({
'id': event.id,
'identifier': event.identifier,
| {"golden_diff": "diff --git a/app/api/event_copy.py b/app/api/event_copy.py\n--- a/app/api/event_copy.py\n+++ b/app/api/event_copy.py\n@@ -14,6 +14,7 @@\n from app.models.sponsor import Sponsor\n from app.models.ticket import Ticket\n from app.models.track import Track\n+from app.models.users_events_role import UsersEventsRoles\n \n event_copy = Blueprint('event_copy', __name__, url_prefix='/v1/events')\n \n@@ -39,6 +40,7 @@\n custom_forms = CustomForms.query.filter_by(event_id=event.id).all()\n discount_codes = DiscountCode.query.filter_by(event_id=event.id).all()\n speaker_calls = SpeakersCall.query.filter_by(event_id=event.id).all()\n+ user_event_roles = UsersEventsRoles.query.filter_by(event_id=event.id).all()\n \n db.session.expunge(event) # expunge the object from session\n make_transient(event)\n@@ -123,6 +125,14 @@\n db.session.add(form)\n db.session.commit()\n \n+ for user_role in user_event_roles:\n+ db.session.expunge(user_role)\n+ make_transient(user_role)\n+ user_role.event_id = event.id\n+ delattr(user_role, 'id')\n+ db.session.add(user_role)\n+ db.session.commit()\n+\n return jsonify({\n 'id': event.id,\n 'identifier': event.identifier,\n", "issue": "copied events have no organizer\n**Describe the bug**\r\n<!-- A clear and concise description of what the bug is. -->\r\nCurrently, copied events have no associated event roles and are being showed with no organizers\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Go to any event\r\n2. Click on copy\r\n3. Go to Admin/Events \r\n4. See error\r\n\r\n\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe copied event should also be organized by previous organiser himself\r\n\r\n**Additional details (please complete the following information):**\r\n - OS: [e.g. MacOS, Ubuntu, CentOS] Ubuntu\r\n - Python Version [e.g. `3.5`, `3.6`] 3.5\r\n\r\n**Additional context**\r\n<!-- Add any other context about the problem here. -->\r\nWorking on it\n", "before_files": [{"content": "from flask import jsonify, Blueprint, abort, make_response\nfrom sqlalchemy.orm import make_transient\n\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.files import create_save_resized_image\nfrom app.api.helpers.permission_manager import has_access\nfrom app.models.custom_form import CustomForms\nfrom app.models.discount_code import DiscountCode\nfrom app.models.event import Event, get_new_event_identifier\nfrom app.models import db\nfrom app.models.microlocation import Microlocation\nfrom app.models.social_link import SocialLink\nfrom app.models.speakers_call import SpeakersCall\nfrom app.models.sponsor import Sponsor\nfrom app.models.ticket import Ticket\nfrom app.models.track import Track\n\nevent_copy = Blueprint('event_copy', __name__, url_prefix='/v1/events')\n\n\n@event_copy.route('/<identifier>/copy', methods=['POST'])\ndef create_event_copy(identifier):\n id = 'identifier'\n\n if identifier.isdigit():\n id = 'id'\n\n event = safe_query(db, Event, id, identifier, 'event_' + id)\n\n if not has_access('is_coorganizer', event_id=event.id):\n return abort(\n make_response(jsonify(error=\"Access Forbidden\"), 403)\n )\n tickets = Ticket.query.filter_by(event_id=event.id).all()\n social_links = SocialLink.query.filter_by(event_id=event.id).all()\n sponsors = Sponsor.query.filter_by(event_id=event.id).all()\n microlocations = Microlocation.query.filter_by(event_id=event.id).all()\n tracks = Track.query.filter_by(event_id=event.id).all()\n custom_forms = CustomForms.query.filter_by(event_id=event.id).all()\n discount_codes = DiscountCode.query.filter_by(event_id=event.id).all()\n speaker_calls = SpeakersCall.query.filter_by(event_id=event.id).all()\n\n db.session.expunge(event) # expunge the object from session\n make_transient(event)\n delattr(event, 'id')\n event.identifier = get_new_event_identifier()\n db.session.add(event)\n db.session.commit()\n\n # Removes access_codes, order_tickets, ticket_tags for the new tickets created.\n for ticket in tickets:\n ticket_id = ticket.id\n db.session.expunge(ticket) # expunge the object from session\n make_transient(ticket)\n ticket.event_id = event.id\n delattr(ticket, 'id')\n db.session.add(ticket)\n db.session.commit()\n\n for link in social_links:\n link_id = link.id\n db.session.expunge(link) # expunge the object from session\n make_transient(link)\n link.event_id = event.id\n delattr(link, 'id')\n db.session.add(link)\n db.session.commit()\n\n for sponsor in sponsors:\n sponsor_id = sponsor.id\n db.session.expunge(sponsor) # expunge the object from session\n make_transient(sponsor)\n sponsor.event_id = event.id\n logo_url = create_save_resized_image(image_file=sponsor.logo_url, resize=False)\n delattr(sponsor, 'id')\n sponsor.logo_url = logo_url\n db.session.add(sponsor)\n db.session.commit()\n\n for location in microlocations:\n location_id = location.id\n db.session.expunge(location) # expunge the object from session\n make_transient(location)\n location.event_id = event.id\n delattr(location, 'id')\n db.session.add(location)\n db.session.commit()\n\n # No sessions are copied for new tracks\n for track in tracks:\n track_id = track.id\n db.session.expunge(track) # expunge the object from session\n make_transient(track)\n track.event_id = event.id\n delattr(track, 'id')\n db.session.add(track)\n db.session.commit()\n\n for call in speaker_calls:\n call_id = call.id\n db.session.expunge(call) # expunge the object from session\n make_transient(call)\n call.event_id = event.id\n delattr(call, 'id')\n db.session.add(call)\n db.session.commit()\n\n for code in discount_codes:\n code_id = code.id\n db.session.expunge(code) # expunge the object from session\n make_transient(code)\n code.event_id = event.id\n delattr(code, 'id')\n db.session.add(code)\n db.session.commit()\n\n for form in custom_forms:\n form_id = form.id\n db.session.expunge(form) # expunge the object from session\n make_transient(form)\n form.event_id = event.id\n delattr(form, 'id')\n db.session.add(form)\n db.session.commit()\n\n return jsonify({\n 'id': event.id,\n 'identifier': event.identifier,\n \"copied\": True\n })\n", "path": "app/api/event_copy.py"}]} | 2,097 | 304 |
gh_patches_debug_40419 | rasdani/github-patches | git_diff | cowrie__cowrie-1564 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
/etc/shadow file contents are incorrect
**Describe the bug**
In the latest honeypot from master branch, a “cat /etc/shadow“ outputs a readme file instead of password contents. It looks the commit 937402ece56a4d272713ea38be32c6dc4191390a replaced the file contents.
**To Reproduce**
- enter honeypot as root
- Run: cat /etc/shadow
**Expected behavior**
Exoecting a shadow file format output, not readme
**Server (please complete the following information):**
- Using the docker image built on 14 may 2021
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 from setuptools import setup
4
5 setup(
6 name="Cowrie",
7 description="Cowrie SSH/Telnet Honeypot.",
8 long_description="Cowrie SSH/Telnet Honeypot.",
9 author="Michel Oosterhof",
10 author_email="[email protected]",
11 maintainer="Michel Oosterhof",
12 maintainer_email="[email protected]",
13 keywords="ssh telnet honeypot",
14 platforms="Unix, Mac OSX",
15 license="BSD",
16 url="https://www.cowrie.org/",
17 packages=["cowrie", "twisted"],
18 include_package_data=True,
19 package_dir={"": "src"},
20 package_data={"": ["*.md"]},
21 use_incremental=True,
22 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4",
23 scripts=["bin/fsctl", "bin/asciinema", "bin/cowrie", "bin/createfs", "bin/playlog"],
24 classifiers=[
25 "Development Status :: 5 - Production/Stable",
26 "Environment :: No Input/Output (Daemon)",
27 "Framework :: Twisted",
28 "Intended Audience :: Developers",
29 "Intended Audience :: System Administrators",
30 "License :: OSI Approved :: BSD License",
31 "Operating System :: MacOS :: MacOS X",
32 "Operating System :: POSIX :: Linux",
33 "Operating System :: POSIX",
34 "Programming Language :: Python",
35 "Topic :: Security",
36 ],
37 setup_requires=["incremental", "click"],
38 install_requires=[
39 "twisted>=17.1.0",
40 "cryptography>=0.9.1",
41 "configparser",
42 "pyopenssl",
43 "pyparsing",
44 "incremental",
45 "packaging",
46 "appdirs>=1.4.0",
47 "python-dateutil",
48 "service_identity>=14.0.0",
49 ],
50 extras_require={
51 "csirtg": ["csirtgsdk>=0.0.0a17"],
52 "dshield": ["requests"],
53 "elasticsearch": ["pyes"],
54 "mysql": ["mysqlclient"],
55 "mongodb": ["pymongo"],
56 "rethinkdblog": ["rethinkdb"],
57 "s3": ["botocore"],
58 "slack": ["slackclient"],
59 "influxdb": ["influxdb"],
60 },
61 )
62
[end of setup.py]
[start of src/cowrie/output/csirtg.py]
1 import os
2 from datetime import datetime
3
4 from csirtgsdk.client import Client
5 from csirtgsdk.indicator import Indicator
6
7 from twisted.python import log
8
9 import cowrie.core.output
10 from cowrie.core.config import CowrieConfig
11
12 USERNAME = os.environ.get("CSIRTG_USER")
13 FEED = os.environ.get("CSIRTG_FEED")
14 TOKEN = os.environ.get("CSIRG_TOKEN")
15 DESCRIPTION = os.environ.get("CSIRTG_DESCRIPTION", "random scanning activity")
16
17
18 class Output(cowrie.core.output.Output):
19 """
20 csirtg output
21 """
22
23 def start(
24 self,
25 ):
26 self.user = CowrieConfig.get("output_csirtg", "username") or USERNAME
27 self.feed = CowrieConfig.get("output_csirtg", "feed") or FEED
28 self.token = CowrieConfig.get("output_csirtg", "token") or TOKEN
29 self.description = CowrieConfig.get(
30 "output_csirtg", "description", fallback=DESCRIPTION
31 )
32 self.context = {}
33 self.client = Client(token=self.token)
34
35 def stop(self):
36 pass
37
38 def write(self, e):
39 peerIP = e["src_ip"]
40 ts = e["timestamp"]
41 system = e.get("system", None)
42
43 if system not in [
44 "cowrie.ssh.factory.CowrieSSHFactory",
45 "cowrie.telnet.transport.HoneyPotTelnetFactory",
46 ]:
47 return
48
49 today = str(datetime.now().date())
50
51 if not self.context.get(today):
52 self.context = {}
53 self.context[today] = set()
54
55 key = ",".join([peerIP, system])
56
57 if key in self.context[today]:
58 return
59
60 self.context[today].add(key)
61
62 tags = "scanner,ssh"
63 port = 22
64 if e["system"] == "cowrie.telnet.transport.HoneyPotTelnetFactory":
65 tags = "scanner,telnet"
66 port = 23
67
68 i = {
69 "user": self.user,
70 "feed": self.feed,
71 "indicator": peerIP,
72 "portlist": port,
73 "protocol": "tcp",
74 "tags": tags,
75 "firsttime": ts,
76 "lasttime": ts,
77 "description": self.description,
78 }
79
80 ret = Indicator(self.client, i).submit()
81 log.msg("logged to csirtg {} ".format(ret["location"]))
82
[end of src/cowrie/output/csirtg.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -36,7 +36,7 @@
],
setup_requires=["incremental", "click"],
install_requires=[
- "twisted>=17.1.0",
+ "twisted==21.1.0",
"cryptography>=0.9.1",
"configparser",
"pyopenssl",
@@ -48,7 +48,7 @@
"service_identity>=14.0.0",
],
extras_require={
- "csirtg": ["csirtgsdk>=0.0.0a17"],
+ "csirtg": ["csirtgsdk==1.1.5"],
"dshield": ["requests"],
"elasticsearch": ["pyes"],
"mysql": ["mysqlclient"],
diff --git a/src/cowrie/output/csirtg.py b/src/cowrie/output/csirtg.py
--- a/src/cowrie/output/csirtg.py
+++ b/src/cowrie/output/csirtg.py
@@ -1,41 +1,50 @@
import os
from datetime import datetime
-from csirtgsdk.client import Client
-from csirtgsdk.indicator import Indicator
-
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
-USERNAME = os.environ.get("CSIRTG_USER")
-FEED = os.environ.get("CSIRTG_FEED")
-TOKEN = os.environ.get("CSIRG_TOKEN")
-DESCRIPTION = os.environ.get("CSIRTG_DESCRIPTION", "random scanning activity")
+token = CowrieConfig.get("output_csirtg", "token", fallback="a1b2c3d4")
+if token == "a1b2c3d4":
+ log.msg("output_csirtg: token not found in configuration file")
+ exit(1)
+
+os.environ["CSIRTG_TOKEN"] = token
+import csirtgsdk # noqa: E402
class Output(cowrie.core.output.Output):
"""
- csirtg output
+ CSIRTG output
"""
- def start(
- self,
- ):
- self.user = CowrieConfig.get("output_csirtg", "username") or USERNAME
- self.feed = CowrieConfig.get("output_csirtg", "feed") or FEED
- self.token = CowrieConfig.get("output_csirtg", "token") or TOKEN
- self.description = CowrieConfig.get(
- "output_csirtg", "description", fallback=DESCRIPTION
- )
+ def start(self):
+ """
+ Start the output module.
+ Note that csirtsdk is imported here because it reads CSIRTG_TOKEN on import
+ Cowrie sets this environment variable.
+ """
+ self.user = CowrieConfig.get("output_csirtg", "username")
+ self.feed = CowrieConfig.get("output_csirtg", "feed")
+ self.debug = CowrieConfig.getboolean("output_csirtg", "debug", fallback=False)
+ self.description = CowrieConfig.get("output_csirtg", "description")
+
self.context = {}
- self.client = Client(token=self.token)
+ # self.client = csirtgsdk.client.Client()
def stop(self):
pass
def write(self, e):
+ """
+ Only pass on connection events
+ """
+ if e["eventid"] == "cowrie.session.connect":
+ self.submitIp(e)
+
+ def submitIp(self, e):
peerIP = e["src_ip"]
ts = e["timestamp"]
system = e.get("system", None)
@@ -77,5 +86,12 @@
"description": self.description,
}
- ret = Indicator(self.client, i).submit()
- log.msg("logged to csirtg {} ".format(ret["location"]))
+ if self.debug is True:
+ log.msg(f"output_csirtg: Submitting {i!r} to CSIRTG")
+
+ ind = csirtgsdk.indicator.Indicator(i).submit()
+
+ if self.debug is True:
+ log.msg(f"output_csirtg: Submitted {ind!r} to CSIRTG")
+
+ log.msg("output_csirtg: submitted to csirtg at {} ".format(ind["location"]))
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -36,7 +36,7 @@\n ],\n setup_requires=[\"incremental\", \"click\"],\n install_requires=[\n- \"twisted>=17.1.0\",\n+ \"twisted==21.1.0\",\n \"cryptography>=0.9.1\",\n \"configparser\",\n \"pyopenssl\",\n@@ -48,7 +48,7 @@\n \"service_identity>=14.0.0\",\n ],\n extras_require={\n- \"csirtg\": [\"csirtgsdk>=0.0.0a17\"],\n+ \"csirtg\": [\"csirtgsdk==1.1.5\"],\n \"dshield\": [\"requests\"],\n \"elasticsearch\": [\"pyes\"],\n \"mysql\": [\"mysqlclient\"],\ndiff --git a/src/cowrie/output/csirtg.py b/src/cowrie/output/csirtg.py\n--- a/src/cowrie/output/csirtg.py\n+++ b/src/cowrie/output/csirtg.py\n@@ -1,41 +1,50 @@\n import os\n from datetime import datetime\n \n-from csirtgsdk.client import Client\n-from csirtgsdk.indicator import Indicator\n-\n from twisted.python import log\n \n import cowrie.core.output\n from cowrie.core.config import CowrieConfig\n \n-USERNAME = os.environ.get(\"CSIRTG_USER\")\n-FEED = os.environ.get(\"CSIRTG_FEED\")\n-TOKEN = os.environ.get(\"CSIRG_TOKEN\")\n-DESCRIPTION = os.environ.get(\"CSIRTG_DESCRIPTION\", \"random scanning activity\")\n+token = CowrieConfig.get(\"output_csirtg\", \"token\", fallback=\"a1b2c3d4\")\n+if token == \"a1b2c3d4\":\n+ log.msg(\"output_csirtg: token not found in configuration file\")\n+ exit(1)\n+\n+os.environ[\"CSIRTG_TOKEN\"] = token\n+import csirtgsdk # noqa: E402\n \n \n class Output(cowrie.core.output.Output):\n \"\"\"\n- csirtg output\n+ CSIRTG output\n \"\"\"\n \n- def start(\n- self,\n- ):\n- self.user = CowrieConfig.get(\"output_csirtg\", \"username\") or USERNAME\n- self.feed = CowrieConfig.get(\"output_csirtg\", \"feed\") or FEED\n- self.token = CowrieConfig.get(\"output_csirtg\", \"token\") or TOKEN\n- self.description = CowrieConfig.get(\n- \"output_csirtg\", \"description\", fallback=DESCRIPTION\n- )\n+ def start(self):\n+ \"\"\"\n+ Start the output module.\n+ Note that csirtsdk is imported here because it reads CSIRTG_TOKEN on import\n+ Cowrie sets this environment variable.\n+ \"\"\"\n+ self.user = CowrieConfig.get(\"output_csirtg\", \"username\")\n+ self.feed = CowrieConfig.get(\"output_csirtg\", \"feed\")\n+ self.debug = CowrieConfig.getboolean(\"output_csirtg\", \"debug\", fallback=False)\n+ self.description = CowrieConfig.get(\"output_csirtg\", \"description\")\n+\n self.context = {}\n- self.client = Client(token=self.token)\n+ # self.client = csirtgsdk.client.Client()\n \n def stop(self):\n pass\n \n def write(self, e):\n+ \"\"\"\n+ Only pass on connection events\n+ \"\"\"\n+ if e[\"eventid\"] == \"cowrie.session.connect\":\n+ self.submitIp(e)\n+\n+ def submitIp(self, e):\n peerIP = e[\"src_ip\"]\n ts = e[\"timestamp\"]\n system = e.get(\"system\", None)\n@@ -77,5 +86,12 @@\n \"description\": self.description,\n }\n \n- ret = Indicator(self.client, i).submit()\n- log.msg(\"logged to csirtg {} \".format(ret[\"location\"]))\n+ if self.debug is True:\n+ log.msg(f\"output_csirtg: Submitting {i!r} to CSIRTG\")\n+\n+ ind = csirtgsdk.indicator.Indicator(i).submit()\n+\n+ if self.debug is True:\n+ log.msg(f\"output_csirtg: Submitted {ind!r} to CSIRTG\")\n+\n+ log.msg(\"output_csirtg: submitted to csirtg at {} \".format(ind[\"location\"]))\n", "issue": "/etc/shadow file contents are incorrect\n**Describe the bug**\r\nIn the latest honeypot from master branch, a \u201ccat /etc/shadow\u201c outputs a readme file instead of password contents. It looks the commit 937402ece56a4d272713ea38be32c6dc4191390a replaced the file contents.\r\n\r\n**To Reproduce**\r\n- enter honeypot as root\r\n- Run: cat /etc/shadow\r\n\r\n**Expected behavior**\r\nExoecting a shadow file format output, not readme\r\n\r\n**Server (please complete the following information):**\r\n - Using the docker image built on 14 may 2021\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nsetup(\n name=\"Cowrie\",\n description=\"Cowrie SSH/Telnet Honeypot.\",\n long_description=\"Cowrie SSH/Telnet Honeypot.\",\n author=\"Michel Oosterhof\",\n author_email=\"[email protected]\",\n maintainer=\"Michel Oosterhof\",\n maintainer_email=\"[email protected]\",\n keywords=\"ssh telnet honeypot\",\n platforms=\"Unix, Mac OSX\",\n license=\"BSD\",\n url=\"https://www.cowrie.org/\",\n packages=[\"cowrie\", \"twisted\"],\n include_package_data=True,\n package_dir={\"\": \"src\"},\n package_data={\"\": [\"*.md\"]},\n use_incremental=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4\",\n scripts=[\"bin/fsctl\", \"bin/asciinema\", \"bin/cowrie\", \"bin/createfs\", \"bin/playlog\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: No Input/Output (Daemon)\",\n \"Framework :: Twisted\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: POSIX\",\n \"Programming Language :: Python\",\n \"Topic :: Security\",\n ],\n setup_requires=[\"incremental\", \"click\"],\n install_requires=[\n \"twisted>=17.1.0\",\n \"cryptography>=0.9.1\",\n \"configparser\",\n \"pyopenssl\",\n \"pyparsing\",\n \"incremental\",\n \"packaging\",\n \"appdirs>=1.4.0\",\n \"python-dateutil\",\n \"service_identity>=14.0.0\",\n ],\n extras_require={\n \"csirtg\": [\"csirtgsdk>=0.0.0a17\"],\n \"dshield\": [\"requests\"],\n \"elasticsearch\": [\"pyes\"],\n \"mysql\": [\"mysqlclient\"],\n \"mongodb\": [\"pymongo\"],\n \"rethinkdblog\": [\"rethinkdb\"],\n \"s3\": [\"botocore\"],\n \"slack\": [\"slackclient\"],\n \"influxdb\": [\"influxdb\"],\n },\n)\n", "path": "setup.py"}, {"content": "import os\nfrom datetime import datetime\n\nfrom csirtgsdk.client import Client\nfrom csirtgsdk.indicator import Indicator\n\nfrom twisted.python import log\n\nimport cowrie.core.output\nfrom cowrie.core.config import CowrieConfig\n\nUSERNAME = os.environ.get(\"CSIRTG_USER\")\nFEED = os.environ.get(\"CSIRTG_FEED\")\nTOKEN = os.environ.get(\"CSIRG_TOKEN\")\nDESCRIPTION = os.environ.get(\"CSIRTG_DESCRIPTION\", \"random scanning activity\")\n\n\nclass Output(cowrie.core.output.Output):\n \"\"\"\n csirtg output\n \"\"\"\n\n def start(\n self,\n ):\n self.user = CowrieConfig.get(\"output_csirtg\", \"username\") or USERNAME\n self.feed = CowrieConfig.get(\"output_csirtg\", \"feed\") or FEED\n self.token = CowrieConfig.get(\"output_csirtg\", \"token\") or TOKEN\n self.description = CowrieConfig.get(\n \"output_csirtg\", \"description\", fallback=DESCRIPTION\n )\n self.context = {}\n self.client = Client(token=self.token)\n\n def stop(self):\n pass\n\n def write(self, e):\n peerIP = e[\"src_ip\"]\n ts = e[\"timestamp\"]\n system = e.get(\"system\", None)\n\n if system not in [\n \"cowrie.ssh.factory.CowrieSSHFactory\",\n \"cowrie.telnet.transport.HoneyPotTelnetFactory\",\n ]:\n return\n\n today = str(datetime.now().date())\n\n if not self.context.get(today):\n self.context = {}\n self.context[today] = set()\n\n key = \",\".join([peerIP, system])\n\n if key in self.context[today]:\n return\n\n self.context[today].add(key)\n\n tags = \"scanner,ssh\"\n port = 22\n if e[\"system\"] == \"cowrie.telnet.transport.HoneyPotTelnetFactory\":\n tags = \"scanner,telnet\"\n port = 23\n\n i = {\n \"user\": self.user,\n \"feed\": self.feed,\n \"indicator\": peerIP,\n \"portlist\": port,\n \"protocol\": \"tcp\",\n \"tags\": tags,\n \"firsttime\": ts,\n \"lasttime\": ts,\n \"description\": self.description,\n }\n\n ret = Indicator(self.client, i).submit()\n log.msg(\"logged to csirtg {} \".format(ret[\"location\"]))\n", "path": "src/cowrie/output/csirtg.py"}]} | 2,051 | 976 |
gh_patches_debug_50124 | rasdani/github-patches | git_diff | scrapy__scrapy-2649 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
After adding request flags subclasses of logformatter that rely on 'flags' format string are broken
#2082 added flags to request but it also renamed formatting string key from flags to response_flags/request_flags
```
CRAWLEDMSG = u"Crawled (%(status)s) %(request)s (referer: %(referer)s)%(flags)s"
+CRAWLEDMSG = u"Crawled (%(status)s) %(request)s%(request_flags)s (referer: %(referer)s)%(response_flags)s"
```
Scrapy allows you to override logformatter and this is what I have in my project. I have logformatter looking rouhgly like this
```python
# dirbot/logf.py
from scrapy.logformatter import LogFormatter
class CustomLogFormatter(LogFormatter):
def crawled(self, request, response, spider):
kwargs = super(CustomLogFormatter, self).crawled(
request, response, spider)
kwargs['msg'] = (
u"Crawled (%(status)s) %(request)s "
u"(referer: %(referer)s, latency: %(latency).2f s)%(flags)s"
)
kwargs['args']['latency'] = response.meta.get('download_latency', 0)
return kwargs
```
now if you enable it in settings `LOG_FORMATTER = 'dirbot.logf.CustomLogFormatter'
` and try to run it with recent master you'll get KeyError
```
2017-03-13 14:15:26 [scrapy.extensions.telnet] DEBUG: Telnet console listening on 127.0.0.1:6023
Traceback (most recent call last):
File "/usr/lib/python2.7/logging/__init__.py", line 851, in emit
msg = self.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 724, in format
return fmt.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 464, in format
record.message = record.getMessage()
File "/usr/lib/python2.7/logging/__init__.py", line 328, in getMessage
msg = msg % self.args
KeyError: u'flags'
Logged from file engine.py, line 238
Traceback (most recent call last):
File "/usr/lib/python2.7/logging/__init__.py", line 851, in emit
msg = self.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 724, in format
return fmt.format(record)
File "/usr/lib/python2.7/logging/__init__.py", line 464, in format
record.message = record.getMessage()
File "/usr/lib/python2.7/logging/__init__.py", line 328, in getMessage
msg = msg % self.args
KeyError: u'flags'
Logged from file engine.py, line 238
2017-03-13 14:15:27 [scrapy.core.scraper] DEBUG: Scraped from <200 http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/>
```
So this change that renamed `flags` to `response_flags/request_flags` seems backward incompatible.
</issue>
<code>
[start of scrapy/logformatter.py]
1 import os
2 import logging
3
4 from twisted.python.failure import Failure
5
6 from scrapy.utils.request import referer_str
7
8 SCRAPEDMSG = u"Scraped from %(src)s" + os.linesep + "%(item)s"
9 DROPPEDMSG = u"Dropped: %(exception)s" + os.linesep + "%(item)s"
10 CRAWLEDMSG = u"Crawled (%(status)s) %(request)s%(request_flags)s (referer: %(referer)s)%(response_flags)s"
11
12
13 class LogFormatter(object):
14 """Class for generating log messages for different actions.
15
16 All methods must return a dictionary listing the parameters `level`, `msg`
17 and `args` which are going to be used for constructing the log message when
18 calling logging.log.
19
20 Dictionary keys for the method outputs:
21 * `level` should be the log level for that action, you can use those
22 from the python logging library: logging.DEBUG, logging.INFO,
23 logging.WARNING, logging.ERROR and logging.CRITICAL.
24
25 * `msg` should be a string that can contain different formatting
26 placeholders. This string, formatted with the provided `args`, is going
27 to be the log message for that action.
28
29 * `args` should be a tuple or dict with the formatting placeholders for
30 `msg`. The final log message is computed as output['msg'] %
31 output['args'].
32 """
33
34 def crawled(self, request, response, spider):
35 request_flags = ' %s' % str(request.flags) if request.flags else ''
36 response_flags = ' %s' % str(response.flags) if response.flags else ''
37 return {
38 'level': logging.DEBUG,
39 'msg': CRAWLEDMSG,
40 'args': {
41 'status': response.status,
42 'request': request,
43 'request_flags' : request_flags,
44 'referer': referer_str(request),
45 'response_flags': response_flags,
46 }
47 }
48
49 def scraped(self, item, response, spider):
50 if isinstance(response, Failure):
51 src = response.getErrorMessage()
52 else:
53 src = response
54 return {
55 'level': logging.DEBUG,
56 'msg': SCRAPEDMSG,
57 'args': {
58 'src': src,
59 'item': item,
60 }
61 }
62
63 def dropped(self, item, exception, response, spider):
64 return {
65 'level': logging.WARNING,
66 'msg': DROPPEDMSG,
67 'args': {
68 'exception': exception,
69 'item': item,
70 }
71 }
72
73 @classmethod
74 def from_crawler(cls, crawler):
75 return cls()
76
[end of scrapy/logformatter.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/scrapy/logformatter.py b/scrapy/logformatter.py
--- a/scrapy/logformatter.py
+++ b/scrapy/logformatter.py
@@ -43,6 +43,8 @@
'request_flags' : request_flags,
'referer': referer_str(request),
'response_flags': response_flags,
+ # backward compatibility with Scrapy logformatter below 1.4 version
+ 'flags': response_flags
}
}
| {"golden_diff": "diff --git a/scrapy/logformatter.py b/scrapy/logformatter.py\n--- a/scrapy/logformatter.py\n+++ b/scrapy/logformatter.py\n@@ -43,6 +43,8 @@\n 'request_flags' : request_flags,\n 'referer': referer_str(request),\n 'response_flags': response_flags,\n+ # backward compatibility with Scrapy logformatter below 1.4 version\n+ 'flags': response_flags\n }\n }\n", "issue": "After adding request flags subclasses of logformatter that rely on 'flags' format string are broken\n#2082 added flags to request but it also renamed formatting string key from flags to response_flags/request_flags\r\n```\r\nCRAWLEDMSG = u\"Crawled (%(status)s) %(request)s (referer: %(referer)s)%(flags)s\"\r\n +CRAWLEDMSG = u\"Crawled (%(status)s) %(request)s%(request_flags)s (referer: %(referer)s)%(response_flags)s\" \r\n```\r\n\r\nScrapy allows you to override logformatter and this is what I have in my project. I have logformatter looking rouhgly like this\r\n\r\n\r\n```python\r\n# dirbot/logf.py\r\nfrom scrapy.logformatter import LogFormatter\r\n\r\n\r\nclass CustomLogFormatter(LogFormatter):\r\n def crawled(self, request, response, spider):\r\n kwargs = super(CustomLogFormatter, self).crawled(\r\n request, response, spider)\r\n kwargs['msg'] = (\r\n u\"Crawled (%(status)s) %(request)s \"\r\n u\"(referer: %(referer)s, latency: %(latency).2f s)%(flags)s\"\r\n )\r\n kwargs['args']['latency'] = response.meta.get('download_latency', 0)\r\n return kwargs\r\n```\r\n\r\nnow if you enable it in settings `LOG_FORMATTER = 'dirbot.logf.CustomLogFormatter'\r\n` and try to run it with recent master you'll get KeyError\r\n\r\n```\r\n2017-03-13 14:15:26 [scrapy.extensions.telnet] DEBUG: Telnet console listening on 127.0.0.1:6023\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 851, in emit\r\n msg = self.format(record)\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 724, in format\r\n return fmt.format(record)\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 464, in format\r\n record.message = record.getMessage()\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 328, in getMessage\r\n msg = msg % self.args\r\nKeyError: u'flags'\r\nLogged from file engine.py, line 238\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 851, in emit\r\n msg = self.format(record)\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 724, in format\r\n return fmt.format(record)\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 464, in format\r\n record.message = record.getMessage()\r\n File \"/usr/lib/python2.7/logging/__init__.py\", line 328, in getMessage\r\n msg = msg % self.args\r\nKeyError: u'flags'\r\nLogged from file engine.py, line 238\r\n2017-03-13 14:15:27 [scrapy.core.scraper] DEBUG: Scraped from <200 http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/>\r\n```\r\n\r\nSo this change that renamed `flags` to `response_flags/request_flags` seems backward incompatible. \n", "before_files": [{"content": "import os\nimport logging\n\nfrom twisted.python.failure import Failure\n\nfrom scrapy.utils.request import referer_str\n\nSCRAPEDMSG = u\"Scraped from %(src)s\" + os.linesep + \"%(item)s\"\nDROPPEDMSG = u\"Dropped: %(exception)s\" + os.linesep + \"%(item)s\"\nCRAWLEDMSG = u\"Crawled (%(status)s) %(request)s%(request_flags)s (referer: %(referer)s)%(response_flags)s\"\n\n\nclass LogFormatter(object):\n \"\"\"Class for generating log messages for different actions.\n\n All methods must return a dictionary listing the parameters `level`, `msg`\n and `args` which are going to be used for constructing the log message when\n calling logging.log.\n\n Dictionary keys for the method outputs:\n * `level` should be the log level for that action, you can use those\n from the python logging library: logging.DEBUG, logging.INFO,\n logging.WARNING, logging.ERROR and logging.CRITICAL.\n\n * `msg` should be a string that can contain different formatting\n placeholders. This string, formatted with the provided `args`, is going\n to be the log message for that action.\n\n * `args` should be a tuple or dict with the formatting placeholders for\n `msg`. The final log message is computed as output['msg'] %\n output['args'].\n \"\"\"\n\n def crawled(self, request, response, spider):\n request_flags = ' %s' % str(request.flags) if request.flags else ''\n response_flags = ' %s' % str(response.flags) if response.flags else ''\n return {\n 'level': logging.DEBUG,\n 'msg': CRAWLEDMSG,\n 'args': {\n 'status': response.status,\n 'request': request,\n 'request_flags' : request_flags,\n 'referer': referer_str(request),\n 'response_flags': response_flags,\n }\n }\n\n def scraped(self, item, response, spider):\n if isinstance(response, Failure):\n src = response.getErrorMessage()\n else:\n src = response\n return {\n 'level': logging.DEBUG,\n 'msg': SCRAPEDMSG,\n 'args': {\n 'src': src,\n 'item': item,\n }\n }\n\n def dropped(self, item, exception, response, spider):\n return {\n 'level': logging.WARNING,\n 'msg': DROPPEDMSG,\n 'args': {\n 'exception': exception,\n 'item': item,\n }\n }\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls()\n", "path": "scrapy/logformatter.py"}]} | 1,971 | 99 |
gh_patches_debug_15426 | rasdani/github-patches | git_diff | airctic__icevision-734 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Can't save a full model using torch.save (at least with faster-RCNN)
It is not possible to save a full model using default settings of `torch.save` (see stack trace below). This is because of the implementation of `remove_internal_model_transforms`, which uses inner functions in its implementation. The default pickle module does not support inner functions.
Workaround: use the `dill` module instead, which does support inner functions.
Suggested fix: It does not look as if the internal functions are necessary. If there were moved to standard functions, then the default pickle module should work.
`torch.save(model, 'mod.pth', pickle_module=pickle)` causes an error.
`torch.save(model, 'mod.pth', pickle_module=dill)` is a workaround.
**To Reproduce**
`torch.save(model, 'mod1-full.pth', pickle_module=pickle)`
results in:
```python
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-12-50f3761f4f3c> in <module>
----> 1 torch.save(model, 'mod1-full.pth', pickle_module=pickle)
~/anaconda3/envs/dlm/lib/python3.8/site-packages/torch/serialization.py in save(obj, f, pickle_module, pickle_protocol, _use_new_zipfile_serialization)
370 if _use_new_zipfile_serialization:
371 with _open_zipfile_writer(opened_file) as opened_zipfile:
--> 372 _save(obj, opened_zipfile, pickle_module, pickle_protocol)
373 return
374 _legacy_save(obj, opened_file, pickle_module, pickle_protocol)
~/anaconda3/envs/dlm/lib/python3.8/site-packages/torch/serialization.py in _save(obj, zip_file, pickle_module, pickle_protocol)
474 pickler = pickle_module.Pickler(data_buf, protocol=pickle_protocol)
475 pickler.persistent_id = persistent_id
--> 476 pickler.dump(obj)
477 data_value = data_buf.getvalue()
478 zip_file.write_record('data.pkl', data_value, len(data_value))
AttributeError: Can't pickle local object 'remove_internal_model_transforms.<locals>.noop_normalize'
```
Relevant definition:
```
def remove_internal_model_transforms(model: GeneralizedRCNN):
def noop_normalize(image: Tensor) -> Tensor:
return image
def noop_resize(
image: Tensor, target: Optional[Dict[str, Tensor]]
) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
return image, target
model.transform.normalize = noop_normalize
model.transform.resize = noop_resize
```
</issue>
<code>
[start of icevision/models/torchvision/utils.py]
1 __all__ = [
2 "remove_internal_model_transforms",
3 "patch_rcnn_param_groups",
4 "patch_retinanet_param_groups",
5 ]
6
7 from icevision.imports import *
8 from icevision.utils import *
9 from torchvision.models.detection.generalized_rcnn import GeneralizedRCNN
10
11
12 def remove_internal_model_transforms(model: GeneralizedRCNN):
13 def noop_normalize(image: Tensor) -> Tensor:
14 return image
15
16 def noop_resize(
17 image: Tensor, target: Optional[Dict[str, Tensor]]
18 ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
19 return image, target
20
21 model.transform.normalize = noop_normalize
22 model.transform.resize = noop_resize
23
24
25 def patch_param_groups(
26 model: nn.Module,
27 head_layers: List[nn.Module],
28 backbone_param_groups: List[List[nn.Parameter]],
29 ):
30 def param_groups(model: nn.Module) -> List[List[nn.Parameter]]:
31 head_param_groups = [list(layer.parameters()) for layer in head_layers]
32
33 _param_groups = backbone_param_groups + head_param_groups
34 check_all_model_params_in_groups2(model, _param_groups)
35
36 return _param_groups
37
38 model.param_groups = MethodType(param_groups, model)
39
40
41 def patch_rcnn_param_groups(model: nn.Module):
42 return patch_param_groups(
43 model=model,
44 head_layers=[model.rpn, model.roi_heads],
45 backbone_param_groups=model.backbone.param_groups(),
46 )
47
48
49 def patch_retinanet_param_groups(model: nn.Module):
50 return patch_param_groups(
51 model=model,
52 head_layers=[model.head],
53 backbone_param_groups=model.backbone.param_groups(),
54 )
55
[end of icevision/models/torchvision/utils.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/icevision/models/torchvision/utils.py b/icevision/models/torchvision/utils.py
--- a/icevision/models/torchvision/utils.py
+++ b/icevision/models/torchvision/utils.py
@@ -9,17 +9,19 @@
from torchvision.models.detection.generalized_rcnn import GeneralizedRCNN
-def remove_internal_model_transforms(model: GeneralizedRCNN):
- def noop_normalize(image: Tensor) -> Tensor:
- return image
+def _noop_normalize(image: Tensor) -> Tensor:
+ return image
+
- def noop_resize(
- image: Tensor, target: Optional[Dict[str, Tensor]]
- ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
- return image, target
+def _noop_resize(
+ image: Tensor, target: Optional[Dict[str, Tensor]]
+) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
+ return image, target
- model.transform.normalize = noop_normalize
- model.transform.resize = noop_resize
+
+def remove_internal_model_transforms(model: GeneralizedRCNN):
+ model.transform.normalize = _noop_normalize
+ model.transform.resize = _noop_resize
def patch_param_groups(
| {"golden_diff": "diff --git a/icevision/models/torchvision/utils.py b/icevision/models/torchvision/utils.py\n--- a/icevision/models/torchvision/utils.py\n+++ b/icevision/models/torchvision/utils.py\n@@ -9,17 +9,19 @@\n from torchvision.models.detection.generalized_rcnn import GeneralizedRCNN\n \n \n-def remove_internal_model_transforms(model: GeneralizedRCNN):\n- def noop_normalize(image: Tensor) -> Tensor:\n- return image\n+def _noop_normalize(image: Tensor) -> Tensor:\n+ return image\n+\n \n- def noop_resize(\n- image: Tensor, target: Optional[Dict[str, Tensor]]\n- ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:\n- return image, target\n+def _noop_resize(\n+ image: Tensor, target: Optional[Dict[str, Tensor]]\n+) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:\n+ return image, target\n \n- model.transform.normalize = noop_normalize\n- model.transform.resize = noop_resize\n+\n+def remove_internal_model_transforms(model: GeneralizedRCNN):\n+ model.transform.normalize = _noop_normalize\n+ model.transform.resize = _noop_resize\n \n \n def patch_param_groups(\n", "issue": "Can't save a full model using torch.save (at least with faster-RCNN)\nIt is not possible to save a full model using default settings of `torch.save` (see stack trace below). This is because of the implementation of `remove_internal_model_transforms`, which uses inner functions in its implementation. The default pickle module does not support inner functions.\r\n\r\nWorkaround: use the `dill` module instead, which does support inner functions.\r\n\r\nSuggested fix: It does not look as if the internal functions are necessary. If there were moved to standard functions, then the default pickle module should work.\r\n`torch.save(model, 'mod.pth', pickle_module=pickle)` causes an error.\r\n\r\n`torch.save(model, 'mod.pth', pickle_module=dill)` is a workaround.\r\n\r\n**To Reproduce**\r\n\r\n`torch.save(model, 'mod1-full.pth', pickle_module=pickle)`\r\nresults in:\r\n\r\n```python\r\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\n<ipython-input-12-50f3761f4f3c> in <module>\r\n----> 1 torch.save(model, 'mod1-full.pth', pickle_module=pickle)\r\n\r\n~/anaconda3/envs/dlm/lib/python3.8/site-packages/torch/serialization.py in save(obj, f, pickle_module, pickle_protocol, _use_new_zipfile_serialization)\r\n 370 if _use_new_zipfile_serialization:\r\n 371 with _open_zipfile_writer(opened_file) as opened_zipfile:\r\n--> 372 _save(obj, opened_zipfile, pickle_module, pickle_protocol)\r\n 373 return\r\n 374 _legacy_save(obj, opened_file, pickle_module, pickle_protocol)\r\n\r\n~/anaconda3/envs/dlm/lib/python3.8/site-packages/torch/serialization.py in _save(obj, zip_file, pickle_module, pickle_protocol)\r\n 474 pickler = pickle_module.Pickler(data_buf, protocol=pickle_protocol)\r\n 475 pickler.persistent_id = persistent_id\r\n--> 476 pickler.dump(obj)\r\n 477 data_value = data_buf.getvalue()\r\n 478 zip_file.write_record('data.pkl', data_value, len(data_value))\r\n\r\nAttributeError: Can't pickle local object 'remove_internal_model_transforms.<locals>.noop_normalize'\r\n```\r\n\r\nRelevant definition:\r\n```\r\ndef remove_internal_model_transforms(model: GeneralizedRCNN):\r\n def noop_normalize(image: Tensor) -> Tensor:\r\n return image\r\n\r\n def noop_resize(\r\n image: Tensor, target: Optional[Dict[str, Tensor]]\r\n ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:\r\n return image, target\r\n\r\n model.transform.normalize = noop_normalize\r\n model.transform.resize = noop_resize\r\n```\r\n\r\n\n", "before_files": [{"content": "__all__ = [\n \"remove_internal_model_transforms\",\n \"patch_rcnn_param_groups\",\n \"patch_retinanet_param_groups\",\n]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\nfrom torchvision.models.detection.generalized_rcnn import GeneralizedRCNN\n\n\ndef remove_internal_model_transforms(model: GeneralizedRCNN):\n def noop_normalize(image: Tensor) -> Tensor:\n return image\n\n def noop_resize(\n image: Tensor, target: Optional[Dict[str, Tensor]]\n ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:\n return image, target\n\n model.transform.normalize = noop_normalize\n model.transform.resize = noop_resize\n\n\ndef patch_param_groups(\n model: nn.Module,\n head_layers: List[nn.Module],\n backbone_param_groups: List[List[nn.Parameter]],\n):\n def param_groups(model: nn.Module) -> List[List[nn.Parameter]]:\n head_param_groups = [list(layer.parameters()) for layer in head_layers]\n\n _param_groups = backbone_param_groups + head_param_groups\n check_all_model_params_in_groups2(model, _param_groups)\n\n return _param_groups\n\n model.param_groups = MethodType(param_groups, model)\n\n\ndef patch_rcnn_param_groups(model: nn.Module):\n return patch_param_groups(\n model=model,\n head_layers=[model.rpn, model.roi_heads],\n backbone_param_groups=model.backbone.param_groups(),\n )\n\n\ndef patch_retinanet_param_groups(model: nn.Module):\n return patch_param_groups(\n model=model,\n head_layers=[model.head],\n backbone_param_groups=model.backbone.param_groups(),\n )\n", "path": "icevision/models/torchvision/utils.py"}]} | 1,595 | 270 |
gh_patches_debug_40489 | rasdani/github-patches | git_diff | mindsdb__lightwood-979 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Bug in SHAP analysis block
Trying out this module, the following error arises with the cloud tutorial for home rentals:
```python
File "/tmp/e74830c1ef5914dfafe52537b8b33cd0479265f508bfd6c616614666305205822.py", line 392, in predict
insights, global_insights = explain(data=data,encoded_data=encoded_data,predictions=df,ts_analysis=None,problem_definition=self.problem_definition,stat_analysis=self.statistical_analysis,runtime_analysis=self.runtime_analyzer,target_name=self.target,target_dtype=self.dtype_dict[self.target],explainer_blocks=self.analysis_blocks,pred_args=self.pred_args)
File "/MindsDB/lightwood/lightwood/analysis/explain.py", line 81, in explain
row_insights, global_insights = block.explain(row_insights, global_insights, **kwargs)
File "/MindsDB/lightwood/lightwood/analysis/helpers/shap.py", line 79, in explain
shap_values = shap_explainer.shap_values(ns.data, silent=True)
File "/MindsDB/nenv/lib/python3.8/site-packages/shap/explainers/_kernel.py", line 186, in shap_values
explanations.append(self.explain(data, **kwargs))
File "/MindsDB/nenv/lib/python3.8/site-packages/shap/explainers/_kernel.py", line 207, in explain
match_instance_to_data(instance, self.data)
File "/MindsDB/nenv/lib/python3.8/site-packages/shap/utils/_legacy.py", line 87, in match_instance_to_data
instance.group_display_values = [instance.x[0, group[0]] if len(group) == 1 else "" for group in data.groups]
File "/MindsDB/nenv/lib/python3.8/site-packages/shap/utils/_legacy.py", line 87, in <listcomp>
instance.group_display_values = [instance.x[0, group[0]] if len(group) == 1 else "" for group in data.groups]
IndexError: index 7 is out of bounds for axis 1 with size 7
```
</issue>
<code>
[start of lightwood/analysis/helpers/shap.py]
1 import warnings
2 from types import SimpleNamespace
3 from typing import Dict, Optional, Tuple
4
5 import numpy as np
6 import pandas as pd
7 from lightwood.analysis.base import BaseAnalysisBlock
8 from lightwood.api import dtype
9 from lightwood.api.types import PredictionArguments
10 from lightwood.data.encoded_ds import EncodedDs
11 from lightwood.helpers.log import log
12 from sklearn.preprocessing import LabelEncoder
13
14 import shap
15
16
17 class ShapleyValues(BaseAnalysisBlock):
18 """
19 Analysis block that estimates column importance with SHAP (SHapley Additive exPlanations), a game theoretic approach
20 to explain the ouput of any machine learning model. SHAP assigns each feature an importance value for a particular
21 prediction.
22
23 Reference:
24 https://shap.readthedocs.io/en/stable/
25 https://proceedings.neurips.cc/paper/2017/file/8a20a8621978632d76c43dfd28b67767-Paper.pdf
26 """
27 label_encoder: LabelEncoder
28
29 def __init__(self, deps: Optional[Tuple] = ...):
30 super().__init__(deps=deps)
31 self.label_encoder = LabelEncoder()
32
33 def analyze(self, info: Dict[str, object], **kwargs) -> Dict[str, object]:
34 log.info('Preparing to compute feature importance values with SHAP')
35 ns = SimpleNamespace(**kwargs)
36
37 output_dtype = ns.dtype_dict[ns.target]
38 train_data: EncodedDs = ns.train_data
39
40 if output_dtype in (dtype.integer, dtype.float, dtype.quantity):
41 pass
42 elif output_dtype in (dtype.binary, dtype.categorical, dtype.tags):
43 self.label_encoder.fit(train_data.data_frame[ns.target].values)
44 else:
45 log.warning(f'ShapleyValues analyzers not supported for type: {output_dtype}')
46 return info
47
48 def model(x: np.ndarray) -> np.ndarray:
49 assert(isinstance(x, np.ndarray))
50 df = pd.DataFrame(data=x, columns=train_data.data_frame.columns)
51 ds = EncodedDs(encoders=train_data.encoders, data_frame=df, target=train_data.target)
52
53 decoded_predictions = ns.predictor(ds=ds, args=PredictionArguments())
54 if output_dtype in (dtype.integer, dtype.float, dtype.quantity):
55 encoded_predictions = decoded_predictions['prediction'].values
56 elif output_dtype in (dtype.binary, dtype.categorical, dtype.tags):
57 encoded_predictions = self.label_encoder.transform(decoded_predictions['prediction'].values)
58
59 return encoded_predictions
60
61 info['shap_explainer'] = shap.KernelExplainer(model=model, data=train_data.data_frame)
62
63 return info
64
65 def explain(self,
66 row_insights: pd.DataFrame,
67 global_insights: Dict[str, object],
68 **kwargs
69 ) -> Tuple[pd.DataFrame, Dict[str, object]]:
70 log.info('Computing feature importance values with Kernel SHAP method')
71 ns = SimpleNamespace(**kwargs)
72
73 shap_explainer = ns.analysis.get('shap_explainer', None)
74 if shap_explainer is None:
75 return row_insights, global_insights
76
77 with warnings.catch_warnings():
78 warnings.filterwarnings("ignore", category=DeprecationWarning)
79 shap_values = shap_explainer.shap_values(ns.data, silent=True)
80
81 shap_values_df = pd.DataFrame(shap_values).rename(
82 mapper=lambda i: f"feature_{i}_impact", axis='columns')
83
84 if kwargs.get('target_dtype', None) in (dtype.binary, dtype.categorical, dtype.tags):
85 predictions = self.label_encoder.transform(row_insights['prediction'])
86 else:
87 predictions = row_insights['prediction']
88
89 base_response = (predictions - shap_values_df.sum(axis='columns')).mean()
90 global_insights['base_response'] = base_response
91
92 row_insights = pd.concat([row_insights, shap_values_df], axis='columns')
93
94 return row_insights, global_insights
95
[end of lightwood/analysis/helpers/shap.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/lightwood/analysis/helpers/shap.py b/lightwood/analysis/helpers/shap.py
--- a/lightwood/analysis/helpers/shap.py
+++ b/lightwood/analysis/helpers/shap.py
@@ -29,6 +29,8 @@
def __init__(self, deps: Optional[Tuple] = ...):
super().__init__(deps=deps)
self.label_encoder = LabelEncoder()
+ self.columns = []
+ self.target = None
def analyze(self, info: Dict[str, object], **kwargs) -> Dict[str, object]:
log.info('Preparing to compute feature importance values with SHAP')
@@ -45,9 +47,13 @@
log.warning(f'ShapleyValues analyzers not supported for type: {output_dtype}')
return info
+ self.target = ns.target
+ self.columns = list(set(ns.dtype_dict.keys()) - {self.target})
+ input_df = train_data.data_frame[self.columns]
+
def model(x: np.ndarray) -> np.ndarray:
assert(isinstance(x, np.ndarray))
- df = pd.DataFrame(data=x, columns=train_data.data_frame.columns)
+ df = pd.DataFrame(data=x, columns=self.columns)
ds = EncodedDs(encoders=train_data.encoders, data_frame=df, target=train_data.target)
decoded_predictions = ns.predictor(ds=ds, args=PredictionArguments())
@@ -58,7 +64,7 @@
return encoded_predictions
- info['shap_explainer'] = shap.KernelExplainer(model=model, data=train_data.data_frame)
+ info['shap_explainer'] = shap.KernelExplainer(model=model, data=input_df)
return info
@@ -76,10 +82,10 @@
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
- shap_values = shap_explainer.shap_values(ns.data, silent=True)
+ shap_values = shap_explainer.shap_values(ns.data[self.columns], silent=True)
shap_values_df = pd.DataFrame(shap_values).rename(
- mapper=lambda i: f"feature_{i}_impact", axis='columns')
+ mapper=lambda i: f"shap_contribution_{self.columns[i]}", axis='columns')
if kwargs.get('target_dtype', None) in (dtype.binary, dtype.categorical, dtype.tags):
predictions = self.label_encoder.transform(row_insights['prediction'])
@@ -87,8 +93,9 @@
predictions = row_insights['prediction']
base_response = (predictions - shap_values_df.sum(axis='columns')).mean()
- global_insights['base_response'] = base_response
row_insights = pd.concat([row_insights, shap_values_df], axis='columns')
+ row_insights['shap_base_response'] = base_response
+ row_insights['shap_final_response'] = predictions
return row_insights, global_insights
| {"golden_diff": "diff --git a/lightwood/analysis/helpers/shap.py b/lightwood/analysis/helpers/shap.py\n--- a/lightwood/analysis/helpers/shap.py\n+++ b/lightwood/analysis/helpers/shap.py\n@@ -29,6 +29,8 @@\n def __init__(self, deps: Optional[Tuple] = ...):\n super().__init__(deps=deps)\n self.label_encoder = LabelEncoder()\n+ self.columns = []\n+ self.target = None\n \n def analyze(self, info: Dict[str, object], **kwargs) -> Dict[str, object]:\n log.info('Preparing to compute feature importance values with SHAP')\n@@ -45,9 +47,13 @@\n log.warning(f'ShapleyValues analyzers not supported for type: {output_dtype}')\n return info\n \n+ self.target = ns.target\n+ self.columns = list(set(ns.dtype_dict.keys()) - {self.target})\n+ input_df = train_data.data_frame[self.columns]\n+\n def model(x: np.ndarray) -> np.ndarray:\n assert(isinstance(x, np.ndarray))\n- df = pd.DataFrame(data=x, columns=train_data.data_frame.columns)\n+ df = pd.DataFrame(data=x, columns=self.columns)\n ds = EncodedDs(encoders=train_data.encoders, data_frame=df, target=train_data.target)\n \n decoded_predictions = ns.predictor(ds=ds, args=PredictionArguments())\n@@ -58,7 +64,7 @@\n \n return encoded_predictions\n \n- info['shap_explainer'] = shap.KernelExplainer(model=model, data=train_data.data_frame)\n+ info['shap_explainer'] = shap.KernelExplainer(model=model, data=input_df)\n \n return info\n \n@@ -76,10 +82,10 @@\n \n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n- shap_values = shap_explainer.shap_values(ns.data, silent=True)\n+ shap_values = shap_explainer.shap_values(ns.data[self.columns], silent=True)\n \n shap_values_df = pd.DataFrame(shap_values).rename(\n- mapper=lambda i: f\"feature_{i}_impact\", axis='columns')\n+ mapper=lambda i: f\"shap_contribution_{self.columns[i]}\", axis='columns')\n \n if kwargs.get('target_dtype', None) in (dtype.binary, dtype.categorical, dtype.tags):\n predictions = self.label_encoder.transform(row_insights['prediction'])\n@@ -87,8 +93,9 @@\n predictions = row_insights['prediction']\n \n base_response = (predictions - shap_values_df.sum(axis='columns')).mean()\n- global_insights['base_response'] = base_response\n \n row_insights = pd.concat([row_insights, shap_values_df], axis='columns')\n+ row_insights['shap_base_response'] = base_response\n+ row_insights['shap_final_response'] = predictions\n \n return row_insights, global_insights\n", "issue": "Bug in SHAP analysis block\nTrying out this module, the following error arises with the cloud tutorial for home rentals:\r\n\r\n```python\r\n File \"/tmp/e74830c1ef5914dfafe52537b8b33cd0479265f508bfd6c616614666305205822.py\", line 392, in predict\r\n insights, global_insights = explain(data=data,encoded_data=encoded_data,predictions=df,ts_analysis=None,problem_definition=self.problem_definition,stat_analysis=self.statistical_analysis,runtime_analysis=self.runtime_analyzer,target_name=self.target,target_dtype=self.dtype_dict[self.target],explainer_blocks=self.analysis_blocks,pred_args=self.pred_args)\r\n File \"/MindsDB/lightwood/lightwood/analysis/explain.py\", line 81, in explain\r\n row_insights, global_insights = block.explain(row_insights, global_insights, **kwargs)\r\n File \"/MindsDB/lightwood/lightwood/analysis/helpers/shap.py\", line 79, in explain\r\n shap_values = shap_explainer.shap_values(ns.data, silent=True)\r\n File \"/MindsDB/nenv/lib/python3.8/site-packages/shap/explainers/_kernel.py\", line 186, in shap_values\r\n explanations.append(self.explain(data, **kwargs))\r\n File \"/MindsDB/nenv/lib/python3.8/site-packages/shap/explainers/_kernel.py\", line 207, in explain\r\n match_instance_to_data(instance, self.data)\r\n File \"/MindsDB/nenv/lib/python3.8/site-packages/shap/utils/_legacy.py\", line 87, in match_instance_to_data\r\n instance.group_display_values = [instance.x[0, group[0]] if len(group) == 1 else \"\" for group in data.groups]\r\n File \"/MindsDB/nenv/lib/python3.8/site-packages/shap/utils/_legacy.py\", line 87, in <listcomp>\r\n instance.group_display_values = [instance.x[0, group[0]] if len(group) == 1 else \"\" for group in data.groups]\r\nIndexError: index 7 is out of bounds for axis 1 with size 7\r\n\r\n```\n", "before_files": [{"content": "import warnings\nfrom types import SimpleNamespace\nfrom typing import Dict, Optional, Tuple\n\nimport numpy as np\nimport pandas as pd\nfrom lightwood.analysis.base import BaseAnalysisBlock\nfrom lightwood.api import dtype\nfrom lightwood.api.types import PredictionArguments\nfrom lightwood.data.encoded_ds import EncodedDs\nfrom lightwood.helpers.log import log\nfrom sklearn.preprocessing import LabelEncoder\n\nimport shap\n\n\nclass ShapleyValues(BaseAnalysisBlock):\n \"\"\"\n Analysis block that estimates column importance with SHAP (SHapley Additive exPlanations), a game theoretic approach\n to explain the ouput of any machine learning model. SHAP assigns each feature an importance value for a particular\n prediction.\n\n Reference:\n https://shap.readthedocs.io/en/stable/\n https://proceedings.neurips.cc/paper/2017/file/8a20a8621978632d76c43dfd28b67767-Paper.pdf\n \"\"\"\n label_encoder: LabelEncoder\n\n def __init__(self, deps: Optional[Tuple] = ...):\n super().__init__(deps=deps)\n self.label_encoder = LabelEncoder()\n\n def analyze(self, info: Dict[str, object], **kwargs) -> Dict[str, object]:\n log.info('Preparing to compute feature importance values with SHAP')\n ns = SimpleNamespace(**kwargs)\n\n output_dtype = ns.dtype_dict[ns.target]\n train_data: EncodedDs = ns.train_data\n\n if output_dtype in (dtype.integer, dtype.float, dtype.quantity):\n pass\n elif output_dtype in (dtype.binary, dtype.categorical, dtype.tags):\n self.label_encoder.fit(train_data.data_frame[ns.target].values)\n else:\n log.warning(f'ShapleyValues analyzers not supported for type: {output_dtype}')\n return info\n\n def model(x: np.ndarray) -> np.ndarray:\n assert(isinstance(x, np.ndarray))\n df = pd.DataFrame(data=x, columns=train_data.data_frame.columns)\n ds = EncodedDs(encoders=train_data.encoders, data_frame=df, target=train_data.target)\n\n decoded_predictions = ns.predictor(ds=ds, args=PredictionArguments())\n if output_dtype in (dtype.integer, dtype.float, dtype.quantity):\n encoded_predictions = decoded_predictions['prediction'].values\n elif output_dtype in (dtype.binary, dtype.categorical, dtype.tags):\n encoded_predictions = self.label_encoder.transform(decoded_predictions['prediction'].values)\n\n return encoded_predictions\n\n info['shap_explainer'] = shap.KernelExplainer(model=model, data=train_data.data_frame)\n\n return info\n\n def explain(self,\n row_insights: pd.DataFrame,\n global_insights: Dict[str, object],\n **kwargs\n ) -> Tuple[pd.DataFrame, Dict[str, object]]:\n log.info('Computing feature importance values with Kernel SHAP method')\n ns = SimpleNamespace(**kwargs)\n\n shap_explainer = ns.analysis.get('shap_explainer', None)\n if shap_explainer is None:\n return row_insights, global_insights\n\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n shap_values = shap_explainer.shap_values(ns.data, silent=True)\n\n shap_values_df = pd.DataFrame(shap_values).rename(\n mapper=lambda i: f\"feature_{i}_impact\", axis='columns')\n\n if kwargs.get('target_dtype', None) in (dtype.binary, dtype.categorical, dtype.tags):\n predictions = self.label_encoder.transform(row_insights['prediction'])\n else:\n predictions = row_insights['prediction']\n\n base_response = (predictions - shap_values_df.sum(axis='columns')).mean()\n global_insights['base_response'] = base_response\n\n row_insights = pd.concat([row_insights, shap_values_df], axis='columns')\n\n return row_insights, global_insights\n", "path": "lightwood/analysis/helpers/shap.py"}]} | 2,086 | 653 |
gh_patches_debug_44330 | rasdani/github-patches | git_diff | litestar-org__litestar-1678 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
</issue>
<code>
[start of litestar/datastructures/state.py]
1 from __future__ import annotations
2
3 from copy import copy, deepcopy
4 from threading import RLock
5 from typing import Any, Callable, Generator, Iterable, Iterator, Mapping, MutableMapping
6
7 __all__ = ("ImmutableState", "State")
8
9
10 class ImmutableState(Mapping[str, Any]):
11 """An object meant to store arbitrary state.
12
13 It can be accessed using dot notation while exposing dict like functionalities.
14 """
15
16 __slots__ = ("_state",)
17
18 _state: dict[str, Any]
19
20 def __init__(
21 self, state: ImmutableState | Mapping[str, Any] | Iterable[tuple[str, Any]], deep_copy: bool = True
22 ) -> None:
23 """Initialize an ``ImmutableState`` instance.
24
25 Args:
26 state: An object to initialize the state from. Can be a dict, an instance of :class:`ImmutableState`, or a tuple
27 of key value paris.
28 deep_copy: Whether to 'deepcopy' the passed in state.
29
30 Examples:
31 .. code-block: python
32
33 from litestar.datastructures import ImmutableState
34
35 state_dict = {"first": 1, "second": 2, "third": 3, "fourth": 4}
36 state = ImmutableState(state_dict)
37
38 # state implements the Mapping type:
39 assert len(state) == 3
40 assert "first" in state
41 assert not "fourth" in state
42 assert state["first"] == 1
43 assert [(k, v) for k, v in state.items()] == [("first", 1), ("second", 2), ("third", 3)]
44
45 # state implements __bool__
46 assert state # state is true when it has values.
47 assert not State() # state is empty when it has no values.
48
49 # it has a 'dict' method to retrieve a shallow copy of the underlying dict
50 inner_dict = state.dict()
51 assert inner_dict == state_dict
52
53 # you can also retrieve a mutable State by calling 'mutable_copy'
54 mutable_state = state.mutable_copy()
55 del state["first"]
56 assert "first" not in state
57
58 """
59
60 if isinstance(state, ImmutableState):
61 state = state._state
62
63 if not isinstance(state, dict) and isinstance(state, Iterable):
64 state = dict(state)
65
66 super().__setattr__("_state", deepcopy(state) if deep_copy else state)
67
68 def __bool__(self) -> bool:
69 """Return a boolean indicating whether the wrapped dict instance has values."""
70 return bool(self._state)
71
72 def __getitem__(self, key: str) -> Any:
73 """Get the value for the corresponding key from the wrapped state object using subscription notation.
74
75 Args:
76 key: Key to access.
77
78 Raises:
79 KeyError
80
81 Returns:
82 A value from the wrapped state instance.
83 """
84 return self._state[key]
85
86 def __iter__(self) -> Iterator[str]:
87 """Return an iterator iterating the wrapped state dict.
88
89 Returns:
90 An iterator of strings
91 """
92 return iter(self._state)
93
94 def __len__(self) -> int:
95 """Return length of the wrapped state dict.
96
97 Returns:
98 An integer
99 """
100 return len(self._state)
101
102 def __getattr__(self, key: str) -> Any:
103 """Get the value for the corresponding key from the wrapped state object using attribute notation.
104
105 Args:
106 key: Key to retrieve
107
108 Raises:
109 AttributeError: if the given attribute is not set.
110
111 Returns:
112 The retrieved value
113 """
114 try:
115 return self._state[key]
116 except KeyError as e:
117 raise AttributeError from e
118
119 def __copy__(self) -> ImmutableState:
120 """Return a shallow copy of the given state object.
121
122 Customizes how the builtin "copy" function will work.
123 """
124 return self.__class__(deepcopy(self._state))
125
126 def mutable_copy(self) -> State:
127 """Return a mutable copy of the state object.
128
129 Returns:
130 A ``State``
131 """
132 return State(self._state)
133
134 def dict(self) -> dict[str, Any]:
135 """Return a shallow copy of the wrapped dict.
136
137 Returns:
138 A dict
139 """
140 return copy(self._state)
141
142 @classmethod
143 def __get_validators__(
144 cls,
145 ) -> Generator[Callable[[ImmutableState | dict[str, Any] | Iterable[tuple[str, Any]]], ImmutableState], None, None]: # type: ignore[valid-type]
146 """Pydantic compatible method to allow custom parsing of state instances in a SignatureModel."""
147 yield cls.validate
148
149 @classmethod
150 def validate(cls, value: ImmutableState | dict[str, Any] | Iterable[tuple[str, Any]]) -> ImmutableState: # type: ignore[valid-type]
151 """Parse a value and instantiate state inside a SignatureModel. This allows us to use custom subclasses of
152 state, as well as allows users to decide whether state is mutable or immutable.
153
154 Args:
155 value: The value from which to initialize the state instance.
156
157 Returns:
158 An ImmutableState instance
159 """
160 return cls(value)
161
162
163 class State(ImmutableState, MutableMapping[str, Any]):
164 """An object meant to store arbitrary state.
165
166 It can be accessed using dot notation while exposing dict like functionalities.
167 """
168
169 __slots__ = ("_lock",)
170
171 _lock: RLock
172
173 def __init__(
174 self,
175 state: ImmutableState | Mapping[str, Any] | Iterable[tuple[str, Any]] | None = None,
176 deep_copy: bool = False,
177 ) -> None:
178 """Initialize a ``State`` instance with an optional value.
179
180 Args:
181 state: An object to initialize the state from. Can be a dict, an instance of 'ImmutableState', or a tuple of key value paris.
182 deep_copy: Whether to 'deepcopy' the passed in state.
183
184 Examples:
185 .. code-block: python
186
187 from litestar.datastructures import State
188
189 state_dict = {"first": 1, "second": 2, "third": 3, "fourth": 4}
190 state = State(state_dict)
191
192 # state can be accessed using '.' notation
193 assert state.fourth == 4
194 del state.fourth
195
196 # state implements the Mapping type:
197 assert len(state) == 3
198 assert "first" in state
199 assert not "fourth" in state
200 assert state["first"] == 1
201 assert [(k, v) for k, v in state.items()] == [("first", 1), ("second", 2), ("third", 3)]
202
203 state["fourth"] = 4
204 assert "fourth" in state
205 del state["fourth"]
206
207 # state implements __bool__
208 assert state # state is true when it has values.
209 assert not State() # state is empty when it has no values.
210
211 # it has shallow copy
212 copied_state = state.copy()
213 del copied_state.first
214 assert state.first
215
216 # it has a 'dict' method to retrieve a shallow copy of the underlying dict
217 inner_dict = state.dict()
218 assert inner_dict == state_dict
219
220 # you can get an immutable copy of the state by calling 'immutable_immutable_copy'
221 immutable_copy = state.immutable_copy()
222 del immutable_copy.first # raises AttributeError
223
224 """
225
226 super().__init__(state if state is not None else {}, deep_copy=deep_copy)
227 super().__setattr__("_lock", RLock())
228
229 def __delitem__(self, key: str) -> None:
230 """Delete the value from the key from the wrapped state object using subscription notation.
231
232 Args:
233 key: Key to delete
234
235 Raises:
236 KeyError: if the given attribute is not set.
237
238 Returns:
239 None
240 """
241
242 with self._lock:
243 del self._state[key]
244
245 def __setitem__(self, key: str, value: Any) -> None:
246 """Set an item in the state using subscription notation.
247
248 Args:
249 key: Key to set.
250 value: Value to set.
251
252 Returns:
253 None
254 """
255
256 with self._lock:
257 self._state[key] = value
258
259 def __setattr__(self, key: str, value: Any) -> None:
260 """Set an item in the state using attribute notation.
261
262 Args:
263 key: Key to set.
264 value: Value to set.
265
266 Returns:
267 None
268 """
269
270 with self._lock:
271 self._state[key] = value
272
273 def __delattr__(self, key: str) -> None:
274 """Delete the value from the key from the wrapped state object using attribute notation.
275
276 Args:
277 key: Key to delete
278
279 Raises:
280 AttributeError: if the given attribute is not set.
281
282 Returns:
283 None
284 """
285
286 try:
287 with self._lock:
288 del self._state[key]
289 except KeyError as e:
290 raise AttributeError from e
291
292 def copy(self) -> State:
293 """Return a shallow copy of the state object.
294
295 Returns:
296 A ``State``
297 """
298 return self.__class__(self.dict())
299
300 def immutable_copy(self) -> ImmutableState:
301 """Return a shallow copy of the state object, setting it to be frozen.
302
303 Returns:
304 A ``State``
305 """
306 return ImmutableState(self)
307
[end of litestar/datastructures/state.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/litestar/datastructures/state.py b/litestar/datastructures/state.py
--- a/litestar/datastructures/state.py
+++ b/litestar/datastructures/state.py
@@ -2,7 +2,10 @@
from copy import copy, deepcopy
from threading import RLock
-from typing import Any, Callable, Generator, Iterable, Iterator, Mapping, MutableMapping
+from typing import TYPE_CHECKING, Any, Callable, Generator, Iterable, Iterator, Mapping, MutableMapping
+
+if TYPE_CHECKING:
+ from typing_extensions import Self
__all__ = ("ImmutableState", "State")
@@ -13,7 +16,10 @@
It can be accessed using dot notation while exposing dict like functionalities.
"""
- __slots__ = ("_state",)
+ __slots__ = (
+ "_state",
+ "_deep_copy",
+ )
_state: dict[str, Any]
@@ -56,13 +62,13 @@
assert "first" not in state
"""
-
if isinstance(state, ImmutableState):
state = state._state
if not isinstance(state, dict) and isinstance(state, Iterable):
state = dict(state)
+ super().__setattr__("_deep_copy", deep_copy)
super().__setattr__("_state", deepcopy(state) if deep_copy else state)
def __bool__(self) -> bool:
@@ -116,12 +122,12 @@
except KeyError as e:
raise AttributeError from e
- def __copy__(self) -> ImmutableState:
+ def __copy__(self) -> Self:
"""Return a shallow copy of the given state object.
Customizes how the builtin "copy" function will work.
"""
- return self.__class__(deepcopy(self._state))
+ return self.__class__(self._state, deep_copy=self._deep_copy)
def mutable_copy(self) -> State:
"""Return a mutable copy of the state object.
@@ -129,7 +135,7 @@
Returns:
A ``State``
"""
- return State(self._state)
+ return State(self._state, deep_copy=self._deep_copy)
def dict(self) -> dict[str, Any]:
"""Return a shallow copy of the wrapped dict.
@@ -147,7 +153,7 @@
yield cls.validate
@classmethod
- def validate(cls, value: ImmutableState | dict[str, Any] | Iterable[tuple[str, Any]]) -> ImmutableState: # type: ignore[valid-type]
+ def validate(cls, value: ImmutableState | dict[str, Any] | Iterable[tuple[str, Any]]) -> Self: # type: ignore[valid-type]
"""Parse a value and instantiate state inside a SignatureModel. This allows us to use custom subclasses of
state, as well as allows users to decide whether state is mutable or immutable.
@@ -157,7 +163,8 @@
Returns:
An ImmutableState instance
"""
- return cls(value)
+ deep_copy = value._deep_copy if isinstance(value, ImmutableState) else False
+ return cls(value, deep_copy=deep_copy)
class State(ImmutableState, MutableMapping[str, Any]):
@@ -289,13 +296,13 @@
except KeyError as e:
raise AttributeError from e
- def copy(self) -> State:
+ def copy(self) -> Self:
"""Return a shallow copy of the state object.
Returns:
A ``State``
"""
- return self.__class__(self.dict())
+ return self.__class__(self.dict(), deep_copy=self._deep_copy)
def immutable_copy(self) -> ImmutableState:
"""Return a shallow copy of the state object, setting it to be frozen.
@@ -303,4 +310,4 @@
Returns:
A ``State``
"""
- return ImmutableState(self)
+ return ImmutableState(self, deep_copy=self._deep_copy)
| {"golden_diff": "diff --git a/litestar/datastructures/state.py b/litestar/datastructures/state.py\n--- a/litestar/datastructures/state.py\n+++ b/litestar/datastructures/state.py\n@@ -2,7 +2,10 @@\n \n from copy import copy, deepcopy\n from threading import RLock\n-from typing import Any, Callable, Generator, Iterable, Iterator, Mapping, MutableMapping\n+from typing import TYPE_CHECKING, Any, Callable, Generator, Iterable, Iterator, Mapping, MutableMapping\n+\n+if TYPE_CHECKING:\n+ from typing_extensions import Self\n \n __all__ = (\"ImmutableState\", \"State\")\n \n@@ -13,7 +16,10 @@\n It can be accessed using dot notation while exposing dict like functionalities.\n \"\"\"\n \n- __slots__ = (\"_state\",)\n+ __slots__ = (\n+ \"_state\",\n+ \"_deep_copy\",\n+ )\n \n _state: dict[str, Any]\n \n@@ -56,13 +62,13 @@\n assert \"first\" not in state\n \n \"\"\"\n-\n if isinstance(state, ImmutableState):\n state = state._state\n \n if not isinstance(state, dict) and isinstance(state, Iterable):\n state = dict(state)\n \n+ super().__setattr__(\"_deep_copy\", deep_copy)\n super().__setattr__(\"_state\", deepcopy(state) if deep_copy else state)\n \n def __bool__(self) -> bool:\n@@ -116,12 +122,12 @@\n except KeyError as e:\n raise AttributeError from e\n \n- def __copy__(self) -> ImmutableState:\n+ def __copy__(self) -> Self:\n \"\"\"Return a shallow copy of the given state object.\n \n Customizes how the builtin \"copy\" function will work.\n \"\"\"\n- return self.__class__(deepcopy(self._state))\n+ return self.__class__(self._state, deep_copy=self._deep_copy)\n \n def mutable_copy(self) -> State:\n \"\"\"Return a mutable copy of the state object.\n@@ -129,7 +135,7 @@\n Returns:\n A ``State``\n \"\"\"\n- return State(self._state)\n+ return State(self._state, deep_copy=self._deep_copy)\n \n def dict(self) -> dict[str, Any]:\n \"\"\"Return a shallow copy of the wrapped dict.\n@@ -147,7 +153,7 @@\n yield cls.validate\n \n @classmethod\n- def validate(cls, value: ImmutableState | dict[str, Any] | Iterable[tuple[str, Any]]) -> ImmutableState: # type: ignore[valid-type]\n+ def validate(cls, value: ImmutableState | dict[str, Any] | Iterable[tuple[str, Any]]) -> Self: # type: ignore[valid-type]\n \"\"\"Parse a value and instantiate state inside a SignatureModel. This allows us to use custom subclasses of\n state, as well as allows users to decide whether state is mutable or immutable.\n \n@@ -157,7 +163,8 @@\n Returns:\n An ImmutableState instance\n \"\"\"\n- return cls(value)\n+ deep_copy = value._deep_copy if isinstance(value, ImmutableState) else False\n+ return cls(value, deep_copy=deep_copy)\n \n \n class State(ImmutableState, MutableMapping[str, Any]):\n@@ -289,13 +296,13 @@\n except KeyError as e:\n raise AttributeError from e\n \n- def copy(self) -> State:\n+ def copy(self) -> Self:\n \"\"\"Return a shallow copy of the state object.\n \n Returns:\n A ``State``\n \"\"\"\n- return self.__class__(self.dict())\n+ return self.__class__(self.dict(), deep_copy=self._deep_copy)\n \n def immutable_copy(self) -> ImmutableState:\n \"\"\"Return a shallow copy of the state object, setting it to be frozen.\n@@ -303,4 +310,4 @@\n Returns:\n A ``State``\n \"\"\"\n- return ImmutableState(self)\n+ return ImmutableState(self, deep_copy=self._deep_copy)\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom copy import copy, deepcopy\nfrom threading import RLock\nfrom typing import Any, Callable, Generator, Iterable, Iterator, Mapping, MutableMapping\n\n__all__ = (\"ImmutableState\", \"State\")\n\n\nclass ImmutableState(Mapping[str, Any]):\n \"\"\"An object meant to store arbitrary state.\n\n It can be accessed using dot notation while exposing dict like functionalities.\n \"\"\"\n\n __slots__ = (\"_state\",)\n\n _state: dict[str, Any]\n\n def __init__(\n self, state: ImmutableState | Mapping[str, Any] | Iterable[tuple[str, Any]], deep_copy: bool = True\n ) -> None:\n \"\"\"Initialize an ``ImmutableState`` instance.\n\n Args:\n state: An object to initialize the state from. Can be a dict, an instance of :class:`ImmutableState`, or a tuple\n of key value paris.\n deep_copy: Whether to 'deepcopy' the passed in state.\n\n Examples:\n .. code-block: python\n\n from litestar.datastructures import ImmutableState\n\n state_dict = {\"first\": 1, \"second\": 2, \"third\": 3, \"fourth\": 4}\n state = ImmutableState(state_dict)\n\n # state implements the Mapping type:\n assert len(state) == 3\n assert \"first\" in state\n assert not \"fourth\" in state\n assert state[\"first\"] == 1\n assert [(k, v) for k, v in state.items()] == [(\"first\", 1), (\"second\", 2), (\"third\", 3)]\n\n # state implements __bool__\n assert state # state is true when it has values.\n assert not State() # state is empty when it has no values.\n\n # it has a 'dict' method to retrieve a shallow copy of the underlying dict\n inner_dict = state.dict()\n assert inner_dict == state_dict\n\n # you can also retrieve a mutable State by calling 'mutable_copy'\n mutable_state = state.mutable_copy()\n del state[\"first\"]\n assert \"first\" not in state\n\n \"\"\"\n\n if isinstance(state, ImmutableState):\n state = state._state\n\n if not isinstance(state, dict) and isinstance(state, Iterable):\n state = dict(state)\n\n super().__setattr__(\"_state\", deepcopy(state) if deep_copy else state)\n\n def __bool__(self) -> bool:\n \"\"\"Return a boolean indicating whether the wrapped dict instance has values.\"\"\"\n return bool(self._state)\n\n def __getitem__(self, key: str) -> Any:\n \"\"\"Get the value for the corresponding key from the wrapped state object using subscription notation.\n\n Args:\n key: Key to access.\n\n Raises:\n KeyError\n\n Returns:\n A value from the wrapped state instance.\n \"\"\"\n return self._state[key]\n\n def __iter__(self) -> Iterator[str]:\n \"\"\"Return an iterator iterating the wrapped state dict.\n\n Returns:\n An iterator of strings\n \"\"\"\n return iter(self._state)\n\n def __len__(self) -> int:\n \"\"\"Return length of the wrapped state dict.\n\n Returns:\n An integer\n \"\"\"\n return len(self._state)\n\n def __getattr__(self, key: str) -> Any:\n \"\"\"Get the value for the corresponding key from the wrapped state object using attribute notation.\n\n Args:\n key: Key to retrieve\n\n Raises:\n AttributeError: if the given attribute is not set.\n\n Returns:\n The retrieved value\n \"\"\"\n try:\n return self._state[key]\n except KeyError as e:\n raise AttributeError from e\n\n def __copy__(self) -> ImmutableState:\n \"\"\"Return a shallow copy of the given state object.\n\n Customizes how the builtin \"copy\" function will work.\n \"\"\"\n return self.__class__(deepcopy(self._state))\n\n def mutable_copy(self) -> State:\n \"\"\"Return a mutable copy of the state object.\n\n Returns:\n A ``State``\n \"\"\"\n return State(self._state)\n\n def dict(self) -> dict[str, Any]:\n \"\"\"Return a shallow copy of the wrapped dict.\n\n Returns:\n A dict\n \"\"\"\n return copy(self._state)\n\n @classmethod\n def __get_validators__(\n cls,\n ) -> Generator[Callable[[ImmutableState | dict[str, Any] | Iterable[tuple[str, Any]]], ImmutableState], None, None]: # type: ignore[valid-type]\n \"\"\"Pydantic compatible method to allow custom parsing of state instances in a SignatureModel.\"\"\"\n yield cls.validate\n\n @classmethod\n def validate(cls, value: ImmutableState | dict[str, Any] | Iterable[tuple[str, Any]]) -> ImmutableState: # type: ignore[valid-type]\n \"\"\"Parse a value and instantiate state inside a SignatureModel. This allows us to use custom subclasses of\n state, as well as allows users to decide whether state is mutable or immutable.\n\n Args:\n value: The value from which to initialize the state instance.\n\n Returns:\n An ImmutableState instance\n \"\"\"\n return cls(value)\n\n\nclass State(ImmutableState, MutableMapping[str, Any]):\n \"\"\"An object meant to store arbitrary state.\n\n It can be accessed using dot notation while exposing dict like functionalities.\n \"\"\"\n\n __slots__ = (\"_lock\",)\n\n _lock: RLock\n\n def __init__(\n self,\n state: ImmutableState | Mapping[str, Any] | Iterable[tuple[str, Any]] | None = None,\n deep_copy: bool = False,\n ) -> None:\n \"\"\"Initialize a ``State`` instance with an optional value.\n\n Args:\n state: An object to initialize the state from. Can be a dict, an instance of 'ImmutableState', or a tuple of key value paris.\n deep_copy: Whether to 'deepcopy' the passed in state.\n\n Examples:\n .. code-block: python\n\n from litestar.datastructures import State\n\n state_dict = {\"first\": 1, \"second\": 2, \"third\": 3, \"fourth\": 4}\n state = State(state_dict)\n\n # state can be accessed using '.' notation\n assert state.fourth == 4\n del state.fourth\n\n # state implements the Mapping type:\n assert len(state) == 3\n assert \"first\" in state\n assert not \"fourth\" in state\n assert state[\"first\"] == 1\n assert [(k, v) for k, v in state.items()] == [(\"first\", 1), (\"second\", 2), (\"third\", 3)]\n\n state[\"fourth\"] = 4\n assert \"fourth\" in state\n del state[\"fourth\"]\n\n # state implements __bool__\n assert state # state is true when it has values.\n assert not State() # state is empty when it has no values.\n\n # it has shallow copy\n copied_state = state.copy()\n del copied_state.first\n assert state.first\n\n # it has a 'dict' method to retrieve a shallow copy of the underlying dict\n inner_dict = state.dict()\n assert inner_dict == state_dict\n\n # you can get an immutable copy of the state by calling 'immutable_immutable_copy'\n immutable_copy = state.immutable_copy()\n del immutable_copy.first # raises AttributeError\n\n \"\"\"\n\n super().__init__(state if state is not None else {}, deep_copy=deep_copy)\n super().__setattr__(\"_lock\", RLock())\n\n def __delitem__(self, key: str) -> None:\n \"\"\"Delete the value from the key from the wrapped state object using subscription notation.\n\n Args:\n key: Key to delete\n\n Raises:\n KeyError: if the given attribute is not set.\n\n Returns:\n None\n \"\"\"\n\n with self._lock:\n del self._state[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n \"\"\"Set an item in the state using subscription notation.\n\n Args:\n key: Key to set.\n value: Value to set.\n\n Returns:\n None\n \"\"\"\n\n with self._lock:\n self._state[key] = value\n\n def __setattr__(self, key: str, value: Any) -> None:\n \"\"\"Set an item in the state using attribute notation.\n\n Args:\n key: Key to set.\n value: Value to set.\n\n Returns:\n None\n \"\"\"\n\n with self._lock:\n self._state[key] = value\n\n def __delattr__(self, key: str) -> None:\n \"\"\"Delete the value from the key from the wrapped state object using attribute notation.\n\n Args:\n key: Key to delete\n\n Raises:\n AttributeError: if the given attribute is not set.\n\n Returns:\n None\n \"\"\"\n\n try:\n with self._lock:\n del self._state[key]\n except KeyError as e:\n raise AttributeError from e\n\n def copy(self) -> State:\n \"\"\"Return a shallow copy of the state object.\n\n Returns:\n A ``State``\n \"\"\"\n return self.__class__(self.dict())\n\n def immutable_copy(self) -> ImmutableState:\n \"\"\"Return a shallow copy of the state object, setting it to be frozen.\n\n Returns:\n A ``State``\n \"\"\"\n return ImmutableState(self)\n", "path": "litestar/datastructures/state.py"}]} | 3,587 | 889 |
gh_patches_debug_3901 | rasdani/github-patches | git_diff | carpentries__amy-646 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
API: don't return todos with unknown start
This breaks the timeline.
</issue>
<code>
[start of api/views.py]
1 import datetime
2
3 from django.db.models import Q
4 from rest_framework.generics import ListAPIView
5 from rest_framework.metadata import SimpleMetadata
6 from rest_framework.permissions import (
7 IsAuthenticatedOrReadOnly, IsAuthenticated
8 )
9 from rest_framework.response import Response
10 from rest_framework.reverse import reverse
11 from rest_framework.views import APIView
12
13 from workshops.models import Badge, Airport, Event, TodoItem, Tag
14 from workshops.util import get_members, default_membership_cutoff
15
16 from .serializers import (
17 PersonNameEmailSerializer,
18 ExportBadgesSerializer,
19 ExportInstructorLocationsSerializer,
20 EventSerializer,
21 TodoSerializer,
22 )
23
24
25 class QueryMetadata(SimpleMetadata):
26 """Additionally include info about query parameters."""
27
28 def determine_metadata(self, request, view):
29 data = super().determine_metadata(request, view)
30
31 try:
32 data['query_params'] = view.get_query_params_description()
33 except AttributeError:
34 pass
35
36 return data
37
38
39 class ApiRoot(APIView):
40 def get(self, request, format=None):
41 return Response({
42 'export-badges': reverse('api:export-badges', request=request,
43 format=format),
44 'export-instructors': reverse('api:export-instructors',
45 request=request, format=format),
46 'export-members': reverse('api:export-members', request=request,
47 format=format),
48 'events-published': reverse('api:events-published',
49 request=request, format=format),
50 'user-todos': reverse('api:user-todos',
51 request=request, format=format),
52 })
53
54
55 class ExportBadgesView(ListAPIView):
56 """List all badges and people who have them."""
57 permission_classes = (IsAuthenticatedOrReadOnly, )
58 paginator = None # disable pagination
59
60 queryset = Badge.objects.prefetch_related('person_set')
61 serializer_class = ExportBadgesSerializer
62
63
64 class ExportInstructorLocationsView(ListAPIView):
65 """List all airports and instructors located near them."""
66 permission_classes = (IsAuthenticatedOrReadOnly, )
67 paginator = None # disable pagination
68
69 queryset = Airport.objects.exclude(person=None) \
70 .prefetch_related('person_set')
71 serializer_class = ExportInstructorLocationsSerializer
72
73
74 class ExportMembersView(ListAPIView):
75 """Show everyone who qualifies as an SCF member."""
76 permission_classes = (IsAuthenticatedOrReadOnly, )
77 paginator = None # disable pagination
78
79 serializer_class = PersonNameEmailSerializer
80
81 def get_queryset(self):
82 earliest_default, latest_default = default_membership_cutoff()
83
84 earliest = self.request.query_params.get('earliest', None)
85 if earliest is not None:
86 try:
87 earliest = datetime.datetime.strptime(earliest, '%Y-%m-%d') \
88 .date()
89 except ValueError:
90 earliest = earliest_default
91 else:
92 earliest = earliest_default
93
94 latest = self.request.query_params.get('latest', None)
95 if latest is not None:
96 try:
97 latest = datetime.datetime.strptime(latest, '%Y-%m-%d').date()
98 except ValueError:
99 latest = latest_default
100 else:
101 latest = latest_default
102
103 return get_members(earliest, latest)
104
105 def get_query_params_description(self):
106 return {
107 'earliest': 'Date of earliest workshop someone taught at.'
108 ' Defaults to -2*365 days from current date.',
109 'latest': 'Date of latest workshop someone taught at.'
110 ' Defaults to current date.',
111 }
112
113
114 class PublishedEvents(ListAPIView):
115 """List published events."""
116
117 # only events that have both a starting date and a URL
118 permission_classes = (IsAuthenticatedOrReadOnly, )
119 paginator = None # disable pagination
120
121 serializer_class = EventSerializer
122
123 metadata_class = QueryMetadata
124
125 def get_queryset(self):
126 """Optionally restrict the returned event set to events hosted by
127 specific host or administered by specific admin."""
128 queryset = Event.objects.published_events()
129
130 administrator = self.request.query_params.get('administrator', None)
131 if administrator is not None:
132 queryset = queryset.filter(administrator__pk=administrator)
133
134 host = self.request.query_params.get('host', None)
135 if host is not None:
136 queryset = queryset.filter(host__pk=host)
137
138 tags = self.request.query_params.getlist('tag', None)
139 if tags:
140 tags = Tag.objects.filter(name__in=tags)
141 for tag in tags:
142 queryset = queryset.filter(tags=tag)
143
144 return queryset
145
146 def get_query_params_description(self):
147 return {
148 'administrator': 'ID of the organization responsible for admin '
149 'work on events.',
150 'host': 'ID of the organization hosting the event.',
151 'tag': "Events' tag(s). You can use this parameter multiple "
152 "times.",
153 }
154
155
156 class UserTodoItems(ListAPIView):
157 permission_classes = (IsAuthenticated, )
158 paginator = None
159 serializer_class = TodoSerializer
160
161 def get_queryset(self):
162 """Return current TODOs for currently logged in user."""
163 return TodoItem.objects.user(self.request.user) \
164 .incomplete() \
165 .select_related('event')
166
[end of api/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/api/views.py b/api/views.py
--- a/api/views.py
+++ b/api/views.py
@@ -162,4 +162,5 @@
"""Return current TODOs for currently logged in user."""
return TodoItem.objects.user(self.request.user) \
.incomplete() \
+ .exclude(due=None) \
.select_related('event')
| {"golden_diff": "diff --git a/api/views.py b/api/views.py\n--- a/api/views.py\n+++ b/api/views.py\n@@ -162,4 +162,5 @@\n \"\"\"Return current TODOs for currently logged in user.\"\"\"\n return TodoItem.objects.user(self.request.user) \\\n .incomplete() \\\n+ .exclude(due=None) \\\n .select_related('event')\n", "issue": "API: don't return todos with unknown start\nThis breaks the timeline.\n\n", "before_files": [{"content": "import datetime\n\nfrom django.db.models import Q\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.metadata import SimpleMetadata\nfrom rest_framework.permissions import (\n IsAuthenticatedOrReadOnly, IsAuthenticated\n)\nfrom rest_framework.response import Response\nfrom rest_framework.reverse import reverse\nfrom rest_framework.views import APIView\n\nfrom workshops.models import Badge, Airport, Event, TodoItem, Tag\nfrom workshops.util import get_members, default_membership_cutoff\n\nfrom .serializers import (\n PersonNameEmailSerializer,\n ExportBadgesSerializer,\n ExportInstructorLocationsSerializer,\n EventSerializer,\n TodoSerializer,\n)\n\n\nclass QueryMetadata(SimpleMetadata):\n \"\"\"Additionally include info about query parameters.\"\"\"\n\n def determine_metadata(self, request, view):\n data = super().determine_metadata(request, view)\n\n try:\n data['query_params'] = view.get_query_params_description()\n except AttributeError:\n pass\n\n return data\n\n\nclass ApiRoot(APIView):\n def get(self, request, format=None):\n return Response({\n 'export-badges': reverse('api:export-badges', request=request,\n format=format),\n 'export-instructors': reverse('api:export-instructors',\n request=request, format=format),\n 'export-members': reverse('api:export-members', request=request,\n format=format),\n 'events-published': reverse('api:events-published',\n request=request, format=format),\n 'user-todos': reverse('api:user-todos',\n request=request, format=format),\n })\n\n\nclass ExportBadgesView(ListAPIView):\n \"\"\"List all badges and people who have them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n queryset = Badge.objects.prefetch_related('person_set')\n serializer_class = ExportBadgesSerializer\n\n\nclass ExportInstructorLocationsView(ListAPIView):\n \"\"\"List all airports and instructors located near them.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n queryset = Airport.objects.exclude(person=None) \\\n .prefetch_related('person_set')\n serializer_class = ExportInstructorLocationsSerializer\n\n\nclass ExportMembersView(ListAPIView):\n \"\"\"Show everyone who qualifies as an SCF member.\"\"\"\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n serializer_class = PersonNameEmailSerializer\n\n def get_queryset(self):\n earliest_default, latest_default = default_membership_cutoff()\n\n earliest = self.request.query_params.get('earliest', None)\n if earliest is not None:\n try:\n earliest = datetime.datetime.strptime(earliest, '%Y-%m-%d') \\\n .date()\n except ValueError:\n earliest = earliest_default\n else:\n earliest = earliest_default\n\n latest = self.request.query_params.get('latest', None)\n if latest is not None:\n try:\n latest = datetime.datetime.strptime(latest, '%Y-%m-%d').date()\n except ValueError:\n latest = latest_default\n else:\n latest = latest_default\n\n return get_members(earliest, latest)\n\n def get_query_params_description(self):\n return {\n 'earliest': 'Date of earliest workshop someone taught at.'\n ' Defaults to -2*365 days from current date.',\n 'latest': 'Date of latest workshop someone taught at.'\n ' Defaults to current date.',\n }\n\n\nclass PublishedEvents(ListAPIView):\n \"\"\"List published events.\"\"\"\n\n # only events that have both a starting date and a URL\n permission_classes = (IsAuthenticatedOrReadOnly, )\n paginator = None # disable pagination\n\n serializer_class = EventSerializer\n\n metadata_class = QueryMetadata\n\n def get_queryset(self):\n \"\"\"Optionally restrict the returned event set to events hosted by\n specific host or administered by specific admin.\"\"\"\n queryset = Event.objects.published_events()\n\n administrator = self.request.query_params.get('administrator', None)\n if administrator is not None:\n queryset = queryset.filter(administrator__pk=administrator)\n\n host = self.request.query_params.get('host', None)\n if host is not None:\n queryset = queryset.filter(host__pk=host)\n\n tags = self.request.query_params.getlist('tag', None)\n if tags:\n tags = Tag.objects.filter(name__in=tags)\n for tag in tags:\n queryset = queryset.filter(tags=tag)\n\n return queryset\n\n def get_query_params_description(self):\n return {\n 'administrator': 'ID of the organization responsible for admin '\n 'work on events.',\n 'host': 'ID of the organization hosting the event.',\n 'tag': \"Events' tag(s). You can use this parameter multiple \"\n \"times.\",\n }\n\n\nclass UserTodoItems(ListAPIView):\n permission_classes = (IsAuthenticated, )\n paginator = None\n serializer_class = TodoSerializer\n\n def get_queryset(self):\n \"\"\"Return current TODOs for currently logged in user.\"\"\"\n return TodoItem.objects.user(self.request.user) \\\n .incomplete() \\\n .select_related('event')\n", "path": "api/views.py"}]} | 2,028 | 83 |
gh_patches_debug_9431 | rasdani/github-patches | git_diff | pwndbg__pwndbg-1272 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
`pwndbg.gdblib.symbol.address` fail to handle the thread-local variables
<img width="1289" alt="截圖 2022-10-11 下午4 46 56" src="https://user-images.githubusercontent.com/61896187/195043471-185d98a1-0df4-43af-ba68-54f8e0853e31.png">
`pwndbg.gdblib.symbol.address('tcache')` will get error instead of return `None`.
Seems like it's because the error starts with `Cannot find thread-local storage` instead of `Cannot find thread-local variables`, so the following line fails to skip it:
https://github.com/pwndbg/pwndbg/blob/478a569cb3bef11f7011cd0a2f374d53ce5997fd/pwndbg/gdblib/symbol.py#L214-L217
cc: @gsingh93
</issue>
<code>
[start of pwndbg/gdblib/symbol.py]
1 """
2 Looking up addresses for function names / symbols, and
3 vice-versa.
4
5 Uses IDA when available if there isn't sufficient symbol
6 information available.
7 """
8 import os
9 import re
10
11 import elftools.common.exceptions
12 import elftools.elf.constants
13 import elftools.elf.elffile
14 import elftools.elf.segments
15 import gdb
16
17 import pwndbg.gdblib.android
18 import pwndbg.gdblib.arch
19 import pwndbg.gdblib.elf
20 import pwndbg.gdblib.events
21 import pwndbg.gdblib.file
22 import pwndbg.gdblib.memory
23 import pwndbg.gdblib.qemu
24 import pwndbg.gdblib.remote
25 import pwndbg.gdblib.stack
26 import pwndbg.ida
27 import pwndbg.lib.memoize
28 import pwndbg.vmmap
29
30
31 def _get_debug_file_directory():
32 """
33 Retrieve the debug file directory path.
34
35 The debug file directory path ('show debug-file-directory') is a comma-
36 separated list of directories which GDB will look in to find the binaries
37 currently loaded.
38 """
39 result = gdb.execute("show debug-file-directory", to_string=True, from_tty=False)
40 expr = r'The directory where separate debug symbols are searched for is "(.*)".\n'
41
42 match = re.search(expr, result)
43
44 if match:
45 return match.group(1)
46 return ""
47
48
49 def _set_debug_file_directory(d):
50 gdb.execute("set debug-file-directory %s" % d, to_string=True, from_tty=False)
51
52
53 def _add_debug_file_directory(d):
54 current = _get_debug_file_directory()
55 if current:
56 _set_debug_file_directory("%s:%s" % (current, d))
57 else:
58 _set_debug_file_directory(d)
59
60
61 if "/usr/lib/debug" not in _get_debug_file_directory():
62 _add_debug_file_directory("/usr/lib/debug")
63
64
65 _remote_files = {}
66
67
68 @pwndbg.gdblib.events.exit
69 def _reset_remote_files():
70 global _remote_files
71 _remote_files = {}
72
73
74 @pwndbg.gdblib.events.new_objfile
75 def _autofetch():
76 """ """
77 if not pwndbg.gdblib.remote.is_remote():
78 return
79
80 if pwndbg.gdblib.qemu.is_qemu_usermode():
81 return
82
83 if pwndbg.gdblib.android.is_android():
84 return
85
86 remote_files_dir = pwndbg.gdblib.file.remote_files_dir()
87 if remote_files_dir not in _get_debug_file_directory().split(":"):
88 _add_debug_file_directory(remote_files_dir)
89
90 for mapping in pwndbg.vmmap.get():
91 objfile = mapping.objfile
92
93 # Don't attempt to download things like '[stack]' and '[heap]'
94 if not objfile.startswith("/"):
95 continue
96
97 # Don't re-download things that we have already downloaded
98 if not objfile or objfile in _remote_files:
99 continue
100
101 msg = "Downloading %r from the remote server" % objfile
102 print(msg, end="")
103
104 try:
105 data = pwndbg.gdblib.file.get(objfile)
106 print("\r" + msg + ": OK")
107 except OSError:
108 # The file could not be downloaded :(
109 print("\r" + msg + ": Failed")
110 return
111
112 filename = os.path.basename(objfile)
113 local_path = os.path.join(remote_files_dir, filename)
114
115 with open(local_path, "wb+") as f:
116 f.write(data)
117
118 _remote_files[objfile] = local_path
119
120 base = None
121 for mapping in pwndbg.vmmap.get():
122 if mapping.objfile != objfile:
123 continue
124
125 if base is None or mapping.vaddr < base.vaddr:
126 base = mapping
127
128 if not base:
129 continue
130
131 base = base.vaddr
132
133 try:
134 elf = elftools.elf.elffile.ELFFile(open(local_path, "rb"))
135 except elftools.common.exceptions.ELFError:
136 continue
137
138 gdb_command = ["add-symbol-file", local_path, hex(int(base))]
139 for section in elf.iter_sections():
140 name = section.name # .decode('latin-1')
141 section = section.header
142 if not section.sh_flags & elftools.elf.constants.SH_FLAGS.SHF_ALLOC:
143 continue
144 gdb_command += ["-s", name, hex(int(base + section.sh_addr))]
145
146 print(" ".join(gdb_command))
147 # gdb.execute(' '.join(gdb_command), from_tty=False, to_string=True)
148
149
150 @pwndbg.lib.memoize.reset_on_objfile
151 def get(address: int, gdb_only=False) -> str:
152 """
153 Retrieve the name for the symbol located at `address`
154 """
155 # Fast path
156 if address < pwndbg.gdblib.memory.MMAP_MIN_ADDR or address >= ((1 << 64) - 1):
157 return ""
158
159 # Don't look up stack addresses
160 if pwndbg.gdblib.stack.find(address):
161 return ""
162
163 # This sucks, but there's not a GDB API for this.
164 result = gdb.execute("info symbol %#x" % int(address), to_string=True, from_tty=False)
165
166 if not gdb_only and result.startswith("No symbol"):
167 address = int(address)
168 exe = pwndbg.gdblib.elf.exe()
169 if exe:
170 exe_map = pwndbg.vmmap.find(exe.address)
171 if exe_map and address in exe_map:
172 res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)
173 return res or ""
174
175 # Expected format looks like this:
176 # main in section .text of /bin/bash
177 # main + 3 in section .text of /bin/bash
178 # system + 1 in section .text of /lib/x86_64-linux-gnu/libc.so.6
179 # No symbol matches system-1.
180 a, b, c, _ = result.split(None, 3)
181
182 if b == "+":
183 return "%s+%s" % (a, c)
184 if b == "in":
185 return a
186
187 return ""
188
189
190 @pwndbg.lib.memoize.reset_on_objfile
191 def address(symbol: str) -> int:
192 """
193 Get the address for `symbol`
194 """
195 try:
196 symbol_obj = gdb.lookup_symbol(symbol)[0]
197 if symbol_obj:
198 return int(symbol_obj.value().address)
199 except gdb.error as e:
200 # Symbol lookup only throws exceptions on errors, not if it failed to
201 # lookup a symbol. We want to raise these errors so we can handle them
202 # properly, but there are some we haven't figured out how to fix yet, so
203 # we ignore those here
204 skipped_exceptions = []
205
206 # This is exception is being thrown by the Go typeinfo tests, we should
207 # investigate why this is happening and see if we can explicitly check
208 # for it with `gdb.selected_frame()`
209 skipped_exceptions.append("No frame selected")
210
211 # If we try to look up a TLS variable when there is no TLS, this
212 # exception occurs. Ideally we should come up with a way to check for
213 # this case before calling `gdb.lookup_symbol`
214 skipped_exceptions.append("Cannot find thread-local variables")
215
216 if all(x not in str(e) for x in skipped_exceptions):
217 raise e
218
219 try:
220 # TODO: We should properly check if we have a connection to the IDA server first
221 address = pwndbg.ida.LocByName(symbol)
222 if address:
223 return address
224 except Exception:
225 pass
226
227
228 @pwndbg.lib.memoize.reset_on_objfile
229 def static_linkage_symbol_address(symbol):
230 if isinstance(symbol, int):
231 return symbol
232
233 try:
234 return int(symbol, 0)
235 except Exception:
236 pass
237
238 try:
239 symbol_obj = gdb.lookup_static_symbol(symbol)
240 return int(symbol_obj.value().address) if symbol_obj else None
241 except gdb.error:
242 return None
243
244
245 @pwndbg.gdblib.events.stop
246 @pwndbg.lib.memoize.reset_on_start
247 def _add_main_exe_to_symbols():
248 if not pwndbg.gdblib.remote.is_remote():
249 return
250
251 if pwndbg.gdblib.android.is_android():
252 return
253
254 exe = pwndbg.gdblib.elf.exe()
255
256 if not exe:
257 return
258
259 addr = exe.address
260
261 if not addr:
262 return
263
264 addr = int(addr)
265
266 mmap = pwndbg.vmmap.find(addr)
267 if not mmap:
268 return
269
270 path = mmap.objfile
271 if path and (pwndbg.gdblib.arch.endian == pwndbg.gdblib.arch.native_endian):
272 try:
273 gdb.execute("add-symbol-file %s" % (path,), from_tty=False, to_string=True)
274 except gdb.error:
275 pass
276
277
278 @pwndbg.lib.memoize.reset_on_stop
279 @pwndbg.lib.memoize.reset_on_start
280 def selected_frame_source_absolute_filename():
281 """
282 Retrieve the symbol table’s source absolute file name from the selected frame.
283
284 In case of missing symbol table or frame information, None is returned.
285 """
286 try:
287 frame = gdb.selected_frame()
288 except gdb.error:
289 return None
290
291 if not frame:
292 return None
293
294 sal = frame.find_sal()
295 if not sal:
296 return None
297
298 symtab = sal.symtab
299 if not symtab:
300 return None
301
302 return symtab.fullname()
303
[end of pwndbg/gdblib/symbol.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pwndbg/gdblib/symbol.py b/pwndbg/gdblib/symbol.py
--- a/pwndbg/gdblib/symbol.py
+++ b/pwndbg/gdblib/symbol.py
@@ -211,7 +211,7 @@
# If we try to look up a TLS variable when there is no TLS, this
# exception occurs. Ideally we should come up with a way to check for
# this case before calling `gdb.lookup_symbol`
- skipped_exceptions.append("Cannot find thread-local variables")
+ skipped_exceptions.append("Cannot find thread-local")
if all(x not in str(e) for x in skipped_exceptions):
raise e
| {"golden_diff": "diff --git a/pwndbg/gdblib/symbol.py b/pwndbg/gdblib/symbol.py\n--- a/pwndbg/gdblib/symbol.py\n+++ b/pwndbg/gdblib/symbol.py\n@@ -211,7 +211,7 @@\n # If we try to look up a TLS variable when there is no TLS, this\n # exception occurs. Ideally we should come up with a way to check for\n # this case before calling `gdb.lookup_symbol`\n- skipped_exceptions.append(\"Cannot find thread-local variables\")\n+ skipped_exceptions.append(\"Cannot find thread-local\")\n \n if all(x not in str(e) for x in skipped_exceptions):\n raise e\n", "issue": "`pwndbg.gdblib.symbol.address` fail to handle the thread-local variables\n<img width=\"1289\" alt=\"\u622a\u5716 2022-10-11 \u4e0b\u53484 46 56\" src=\"https://user-images.githubusercontent.com/61896187/195043471-185d98a1-0df4-43af-ba68-54f8e0853e31.png\">\r\n\r\n`pwndbg.gdblib.symbol.address('tcache')` will get error instead of return `None`.\r\n\r\nSeems like it's because the error starts with `Cannot find thread-local storage` instead of `Cannot find thread-local variables`, so the following line fails to skip it:\r\n\r\nhttps://github.com/pwndbg/pwndbg/blob/478a569cb3bef11f7011cd0a2f374d53ce5997fd/pwndbg/gdblib/symbol.py#L214-L217\r\n\r\ncc: @gsingh93 \n", "before_files": [{"content": "\"\"\"\nLooking up addresses for function names / symbols, and\nvice-versa.\n\nUses IDA when available if there isn't sufficient symbol\ninformation available.\n\"\"\"\nimport os\nimport re\n\nimport elftools.common.exceptions\nimport elftools.elf.constants\nimport elftools.elf.elffile\nimport elftools.elf.segments\nimport gdb\n\nimport pwndbg.gdblib.android\nimport pwndbg.gdblib.arch\nimport pwndbg.gdblib.elf\nimport pwndbg.gdblib.events\nimport pwndbg.gdblib.file\nimport pwndbg.gdblib.memory\nimport pwndbg.gdblib.qemu\nimport pwndbg.gdblib.remote\nimport pwndbg.gdblib.stack\nimport pwndbg.ida\nimport pwndbg.lib.memoize\nimport pwndbg.vmmap\n\n\ndef _get_debug_file_directory():\n \"\"\"\n Retrieve the debug file directory path.\n\n The debug file directory path ('show debug-file-directory') is a comma-\n separated list of directories which GDB will look in to find the binaries\n currently loaded.\n \"\"\"\n result = gdb.execute(\"show debug-file-directory\", to_string=True, from_tty=False)\n expr = r'The directory where separate debug symbols are searched for is \"(.*)\".\\n'\n\n match = re.search(expr, result)\n\n if match:\n return match.group(1)\n return \"\"\n\n\ndef _set_debug_file_directory(d):\n gdb.execute(\"set debug-file-directory %s\" % d, to_string=True, from_tty=False)\n\n\ndef _add_debug_file_directory(d):\n current = _get_debug_file_directory()\n if current:\n _set_debug_file_directory(\"%s:%s\" % (current, d))\n else:\n _set_debug_file_directory(d)\n\n\nif \"/usr/lib/debug\" not in _get_debug_file_directory():\n _add_debug_file_directory(\"/usr/lib/debug\")\n\n\n_remote_files = {}\n\n\[email protected]\ndef _reset_remote_files():\n global _remote_files\n _remote_files = {}\n\n\[email protected]_objfile\ndef _autofetch():\n \"\"\" \"\"\"\n if not pwndbg.gdblib.remote.is_remote():\n return\n\n if pwndbg.gdblib.qemu.is_qemu_usermode():\n return\n\n if pwndbg.gdblib.android.is_android():\n return\n\n remote_files_dir = pwndbg.gdblib.file.remote_files_dir()\n if remote_files_dir not in _get_debug_file_directory().split(\":\"):\n _add_debug_file_directory(remote_files_dir)\n\n for mapping in pwndbg.vmmap.get():\n objfile = mapping.objfile\n\n # Don't attempt to download things like '[stack]' and '[heap]'\n if not objfile.startswith(\"/\"):\n continue\n\n # Don't re-download things that we have already downloaded\n if not objfile or objfile in _remote_files:\n continue\n\n msg = \"Downloading %r from the remote server\" % objfile\n print(msg, end=\"\")\n\n try:\n data = pwndbg.gdblib.file.get(objfile)\n print(\"\\r\" + msg + \": OK\")\n except OSError:\n # The file could not be downloaded :(\n print(\"\\r\" + msg + \": Failed\")\n return\n\n filename = os.path.basename(objfile)\n local_path = os.path.join(remote_files_dir, filename)\n\n with open(local_path, \"wb+\") as f:\n f.write(data)\n\n _remote_files[objfile] = local_path\n\n base = None\n for mapping in pwndbg.vmmap.get():\n if mapping.objfile != objfile:\n continue\n\n if base is None or mapping.vaddr < base.vaddr:\n base = mapping\n\n if not base:\n continue\n\n base = base.vaddr\n\n try:\n elf = elftools.elf.elffile.ELFFile(open(local_path, \"rb\"))\n except elftools.common.exceptions.ELFError:\n continue\n\n gdb_command = [\"add-symbol-file\", local_path, hex(int(base))]\n for section in elf.iter_sections():\n name = section.name # .decode('latin-1')\n section = section.header\n if not section.sh_flags & elftools.elf.constants.SH_FLAGS.SHF_ALLOC:\n continue\n gdb_command += [\"-s\", name, hex(int(base + section.sh_addr))]\n\n print(\" \".join(gdb_command))\n # gdb.execute(' '.join(gdb_command), from_tty=False, to_string=True)\n\n\[email protected]_on_objfile\ndef get(address: int, gdb_only=False) -> str:\n \"\"\"\n Retrieve the name for the symbol located at `address`\n \"\"\"\n # Fast path\n if address < pwndbg.gdblib.memory.MMAP_MIN_ADDR or address >= ((1 << 64) - 1):\n return \"\"\n\n # Don't look up stack addresses\n if pwndbg.gdblib.stack.find(address):\n return \"\"\n\n # This sucks, but there's not a GDB API for this.\n result = gdb.execute(\"info symbol %#x\" % int(address), to_string=True, from_tty=False)\n\n if not gdb_only and result.startswith(\"No symbol\"):\n address = int(address)\n exe = pwndbg.gdblib.elf.exe()\n if exe:\n exe_map = pwndbg.vmmap.find(exe.address)\n if exe_map and address in exe_map:\n res = pwndbg.ida.Name(address) or pwndbg.ida.GetFuncOffset(address)\n return res or \"\"\n\n # Expected format looks like this:\n # main in section .text of /bin/bash\n # main + 3 in section .text of /bin/bash\n # system + 1 in section .text of /lib/x86_64-linux-gnu/libc.so.6\n # No symbol matches system-1.\n a, b, c, _ = result.split(None, 3)\n\n if b == \"+\":\n return \"%s+%s\" % (a, c)\n if b == \"in\":\n return a\n\n return \"\"\n\n\[email protected]_on_objfile\ndef address(symbol: str) -> int:\n \"\"\"\n Get the address for `symbol`\n \"\"\"\n try:\n symbol_obj = gdb.lookup_symbol(symbol)[0]\n if symbol_obj:\n return int(symbol_obj.value().address)\n except gdb.error as e:\n # Symbol lookup only throws exceptions on errors, not if it failed to\n # lookup a symbol. We want to raise these errors so we can handle them\n # properly, but there are some we haven't figured out how to fix yet, so\n # we ignore those here\n skipped_exceptions = []\n\n # This is exception is being thrown by the Go typeinfo tests, we should\n # investigate why this is happening and see if we can explicitly check\n # for it with `gdb.selected_frame()`\n skipped_exceptions.append(\"No frame selected\")\n\n # If we try to look up a TLS variable when there is no TLS, this\n # exception occurs. Ideally we should come up with a way to check for\n # this case before calling `gdb.lookup_symbol`\n skipped_exceptions.append(\"Cannot find thread-local variables\")\n\n if all(x not in str(e) for x in skipped_exceptions):\n raise e\n\n try:\n # TODO: We should properly check if we have a connection to the IDA server first\n address = pwndbg.ida.LocByName(symbol)\n if address:\n return address\n except Exception:\n pass\n\n\[email protected]_on_objfile\ndef static_linkage_symbol_address(symbol):\n if isinstance(symbol, int):\n return symbol\n\n try:\n return int(symbol, 0)\n except Exception:\n pass\n\n try:\n symbol_obj = gdb.lookup_static_symbol(symbol)\n return int(symbol_obj.value().address) if symbol_obj else None\n except gdb.error:\n return None\n\n\[email protected]\[email protected]_on_start\ndef _add_main_exe_to_symbols():\n if not pwndbg.gdblib.remote.is_remote():\n return\n\n if pwndbg.gdblib.android.is_android():\n return\n\n exe = pwndbg.gdblib.elf.exe()\n\n if not exe:\n return\n\n addr = exe.address\n\n if not addr:\n return\n\n addr = int(addr)\n\n mmap = pwndbg.vmmap.find(addr)\n if not mmap:\n return\n\n path = mmap.objfile\n if path and (pwndbg.gdblib.arch.endian == pwndbg.gdblib.arch.native_endian):\n try:\n gdb.execute(\"add-symbol-file %s\" % (path,), from_tty=False, to_string=True)\n except gdb.error:\n pass\n\n\[email protected]_on_stop\[email protected]_on_start\ndef selected_frame_source_absolute_filename():\n \"\"\"\n Retrieve the symbol table\u2019s source absolute file name from the selected frame.\n\n In case of missing symbol table or frame information, None is returned.\n \"\"\"\n try:\n frame = gdb.selected_frame()\n except gdb.error:\n return None\n\n if not frame:\n return None\n\n sal = frame.find_sal()\n if not sal:\n return None\n\n symtab = sal.symtab\n if not symtab:\n return None\n\n return symtab.fullname()\n", "path": "pwndbg/gdblib/symbol.py"}]} | 3,692 | 153 |
gh_patches_debug_3378 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-2055 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
WandbLogger warning not logging logs.
<!--
### Common bugs:
1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79).
2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq)
-->
## 🐛 Bug
WandbLogger giving warning: `WARNING Adding to old History rows isn't currently supported. Step 25 < 38` and not logging when I try to use the WandbLogger with k-fold cross-validation because there I am using the same instance of `wandb_logger` but using `trainer.fit` multiple times for different train_dl and valid_dl. Since the step gets repeated in each case, it's not logging anything after the 1st fold is complete even though the log keys are completely different. It was working perfectly with pytorch-lightning v-0.7.4. For now, I have to create separate experiments for each fold which are hard to analyze on wandb.
<!-- A clear and concise description of what the bug is. -->
### To Reproduce
#### Code sample
[Colab Notebook](https://colab.research.google.com/drive/1fg-VVeSB71dWEH1qDvvdXTyfxeU--vsM?usp=sharing)
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
### Expected behavior
It should log even when the global_step is repeated in case if the logs keys are different.
<!-- A clear and concise description of what you expected to happen. -->
### Environment
- PyTorch Version (e.g., 1.0): 1.5.0
- OS (e.g., Linux): Linux
- How you installed PyTorch (`conda`, `pip`, source): pip
- Build command you used (if compiling from source):
- Python version: 3.7
- CUDA/cuDNN version: 10.1
- GPU models and configuration: Tesla P100-PCIE-16GB
- Any other relevant information:
### Additional context
<!-- Add any other context about the problem here. -->
WandbLogger warning not logging logs.
<!--
### Common bugs:
1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79).
2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq)
-->
## 🐛 Bug
WandbLogger giving warning: `WARNING Adding to old History rows isn't currently supported. Step 25 < 38` and not logging when I try to use the WandbLogger with k-fold cross-validation because there I am using the same instance of `wandb_logger` but using `trainer.fit` multiple times for different train_dl and valid_dl. Since the step gets repeated in each case, it's not logging anything after the 1st fold is complete even though the log keys are completely different. It was working perfectly with pytorch-lightning v-0.7.4. For now, I have to create separate experiments for each fold which are hard to analyze on wandb.
<!-- A clear and concise description of what the bug is. -->
### To Reproduce
#### Code sample
[Colab Notebook](https://colab.research.google.com/drive/1fg-VVeSB71dWEH1qDvvdXTyfxeU--vsM?usp=sharing)
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
### Expected behavior
It should log even when the global_step is repeated in case if the logs keys are different.
<!-- A clear and concise description of what you expected to happen. -->
### Environment
- PyTorch Version (e.g., 1.0): 1.5.0
- OS (e.g., Linux): Linux
- How you installed PyTorch (`conda`, `pip`, source): pip
- Build command you used (if compiling from source):
- Python version: 3.7
- CUDA/cuDNN version: 10.1
- GPU models and configuration: Tesla P100-PCIE-16GB
- Any other relevant information:
### Additional context
<!-- Add any other context about the problem here. -->
</issue>
<code>
[start of pytorch_lightning/loggers/wandb.py]
1 """
2 Weights and Biases
3 ------------------
4 """
5 import os
6 from argparse import Namespace
7 from typing import Optional, List, Dict, Union, Any
8
9 import torch.nn as nn
10
11 try:
12 import wandb
13 from wandb.wandb_run import Run
14 _WANDB_AVAILABLE = True
15 except ImportError: # pragma: no-cover
16 wandb = None
17 Run = None
18 _WANDB_AVAILABLE = False
19
20 from pytorch_lightning.loggers.base import LightningLoggerBase
21 from pytorch_lightning.utilities import rank_zero_only
22
23
24 class WandbLogger(LightningLoggerBase):
25 """
26 Log using `Weights and Biases <https://www.wandb.com/>`_. Install it with pip:
27
28 .. code-block:: bash
29
30 pip install wandb
31
32 Args:
33 name: Display name for the run.
34 save_dir: Path where data is saved.
35 offline: Run offline (data can be streamed later to wandb servers).
36 id: Sets the version, mainly used to resume a previous run.
37 anonymous: Enables or explicitly disables anonymous logging.
38 version: Sets the version, mainly used to resume a previous run.
39 project: The name of the project to which this run will belong.
40 tags: Tags associated with this run.
41 log_model: Save checkpoints in wandb dir to upload on W&B servers.
42 experiment: WandB experiment object
43 entity: The team posting this run (default: your username or your default team)
44 group: A unique string shared by all runs in a given group
45
46 Example:
47 >>> from pytorch_lightning.loggers import WandbLogger
48 >>> from pytorch_lightning import Trainer
49 >>> wandb_logger = WandbLogger()
50 >>> trainer = Trainer(logger=wandb_logger)
51
52 See Also:
53 - `Tutorial <https://app.wandb.ai/cayush/pytorchlightning/reports/
54 Use-Pytorch-Lightning-with-Weights-%26-Biases--Vmlldzo2NjQ1Mw>`__
55 on how to use W&B with Pytorch Lightning.
56
57 """
58
59 def __init__(self,
60 name: Optional[str] = None,
61 save_dir: Optional[str] = None,
62 offline: bool = False,
63 id: Optional[str] = None,
64 anonymous: bool = False,
65 version: Optional[str] = None,
66 project: Optional[str] = None,
67 tags: Optional[List[str]] = None,
68 log_model: bool = False,
69 experiment=None,
70 entity=None,
71 group: Optional[str] = None):
72 if not _WANDB_AVAILABLE:
73 raise ImportError('You want to use `wandb` logger which is not installed yet,' # pragma: no-cover
74 ' install it with `pip install wandb`.')
75 super().__init__()
76 self._name = name
77 self._save_dir = save_dir
78 self._anonymous = 'allow' if anonymous else None
79 self._id = version or id
80 self._tags = tags
81 self._project = project
82 self._experiment = experiment
83 self._offline = offline
84 self._entity = entity
85 self._log_model = log_model
86 self._group = group
87
88 def __getstate__(self):
89 state = self.__dict__.copy()
90 # args needed to reload correct experiment
91 state['_id'] = self._experiment.id if self._experiment is not None else None
92
93 # cannot be pickled
94 state['_experiment'] = None
95 return state
96
97 @property
98 def experiment(self) -> Run:
99 r"""
100
101 Actual wandb object. To use wandb features in your
102 :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.
103
104 Example::
105
106 self.logger.experiment.some_wandb_function()
107
108 """
109 if self._experiment is None:
110 if self._offline:
111 os.environ['WANDB_MODE'] = 'dryrun'
112 self._experiment = wandb.init(
113 name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous,
114 reinit=True, id=self._id, resume='allow', tags=self._tags, entity=self._entity,
115 group=self._group)
116 # save checkpoints in wandb dir to upload on W&B servers
117 if self._log_model:
118 self.save_dir = self._experiment.dir
119 return self._experiment
120
121 def watch(self, model: nn.Module, log: str = 'gradients', log_freq: int = 100):
122 self.experiment.watch(model, log=log, log_freq=log_freq)
123
124 @rank_zero_only
125 def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
126 params = self._convert_params(params)
127 self.experiment.config.update(params, allow_val_change=True)
128
129 @rank_zero_only
130 def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
131 self.experiment.log(metrics, step=step)
132
133 @property
134 def name(self) -> str:
135 # don't create an experiment if we don't have one
136 name = self._experiment.project_name() if self._experiment else None
137 return name
138
139 @property
140 def version(self) -> str:
141 # don't create an experiment if we don't have one
142 return self._experiment.id if self._experiment else None
143
[end of pytorch_lightning/loggers/wandb.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/pytorch_lightning/loggers/wandb.py b/pytorch_lightning/loggers/wandb.py
--- a/pytorch_lightning/loggers/wandb.py
+++ b/pytorch_lightning/loggers/wandb.py
@@ -128,7 +128,7 @@
@rank_zero_only
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
- self.experiment.log(metrics, step=step)
+ self.experiment.log({'global_step': step, **metrics} if step is not None else metrics)
@property
def name(self) -> str:
| {"golden_diff": "diff --git a/pytorch_lightning/loggers/wandb.py b/pytorch_lightning/loggers/wandb.py\n--- a/pytorch_lightning/loggers/wandb.py\n+++ b/pytorch_lightning/loggers/wandb.py\n@@ -128,7 +128,7 @@\n \n @rank_zero_only\n def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:\n- self.experiment.log(metrics, step=step)\n+ self.experiment.log({'global_step': step, **metrics} if step is not None else metrics)\n \n @property\n def name(self) -> str:\n", "issue": "WandbLogger warning not logging logs.\n<!-- \r\n### Common bugs:\r\n1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). \r\n2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) \r\n-->\r\n\r\n## \ud83d\udc1b Bug\r\nWandbLogger giving warning: `WARNING Adding to old History rows isn't currently supported. Step 25 < 38` and not logging when I try to use the WandbLogger with k-fold cross-validation because there I am using the same instance of `wandb_logger` but using `trainer.fit` multiple times for different train_dl and valid_dl. Since the step gets repeated in each case, it's not logging anything after the 1st fold is complete even though the log keys are completely different. It was working perfectly with pytorch-lightning v-0.7.4. For now, I have to create separate experiments for each fold which are hard to analyze on wandb.\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n### To Reproduce\r\n#### Code sample\r\n[Colab Notebook](https://colab.research.google.com/drive/1fg-VVeSB71dWEH1qDvvdXTyfxeU--vsM?usp=sharing)\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue. \r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n### Expected behavior\r\nIt should log even when the global_step is repeated in case if the logs keys are different.\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n### Environment\r\n - PyTorch Version (e.g., 1.0): 1.5.0\r\n - OS (e.g., Linux): Linux\r\n - How you installed PyTorch (`conda`, `pip`, source): pip\r\n - Build command you used (if compiling from source):\r\n - Python version: 3.7\r\n - CUDA/cuDNN version: 10.1\r\n - GPU models and configuration: Tesla P100-PCIE-16GB\r\n - Any other relevant information:\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\nWandbLogger warning not logging logs.\n<!-- \r\n### Common bugs:\r\n1. Tensorboard not showing in Jupyter-notebook see [issue 79](https://github.com/PyTorchLightning/pytorch-lightning/issues/79). \r\n2. PyTorch 1.1.0 vs 1.2.0 support [see FAQ](https://github.com/PyTorchLightning/pytorch-lightning#faq) \r\n-->\r\n\r\n## \ud83d\udc1b Bug\r\nWandbLogger giving warning: `WARNING Adding to old History rows isn't currently supported. Step 25 < 38` and not logging when I try to use the WandbLogger with k-fold cross-validation because there I am using the same instance of `wandb_logger` but using `trainer.fit` multiple times for different train_dl and valid_dl. Since the step gets repeated in each case, it's not logging anything after the 1st fold is complete even though the log keys are completely different. It was working perfectly with pytorch-lightning v-0.7.4. For now, I have to create separate experiments for each fold which are hard to analyze on wandb.\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n### To Reproduce\r\n#### Code sample\r\n[Colab Notebook](https://colab.research.google.com/drive/1fg-VVeSB71dWEH1qDvvdXTyfxeU--vsM?usp=sharing)\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue. \r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n### Expected behavior\r\nIt should log even when the global_step is repeated in case if the logs keys are different.\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n### Environment\r\n - PyTorch Version (e.g., 1.0): 1.5.0\r\n - OS (e.g., Linux): Linux\r\n - How you installed PyTorch (`conda`, `pip`, source): pip\r\n - Build command you used (if compiling from source):\r\n - Python version: 3.7\r\n - CUDA/cuDNN version: 10.1\r\n - GPU models and configuration: Tesla P100-PCIE-16GB\r\n - Any other relevant information:\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "\"\"\"\nWeights and Biases\n------------------\n\"\"\"\nimport os\nfrom argparse import Namespace\nfrom typing import Optional, List, Dict, Union, Any\n\nimport torch.nn as nn\n\ntry:\n import wandb\n from wandb.wandb_run import Run\n _WANDB_AVAILABLE = True\nexcept ImportError: # pragma: no-cover\n wandb = None\n Run = None\n _WANDB_AVAILABLE = False\n\nfrom pytorch_lightning.loggers.base import LightningLoggerBase\nfrom pytorch_lightning.utilities import rank_zero_only\n\n\nclass WandbLogger(LightningLoggerBase):\n \"\"\"\n Log using `Weights and Biases <https://www.wandb.com/>`_. Install it with pip:\n\n .. code-block:: bash\n\n pip install wandb\n\n Args:\n name: Display name for the run.\n save_dir: Path where data is saved.\n offline: Run offline (data can be streamed later to wandb servers).\n id: Sets the version, mainly used to resume a previous run.\n anonymous: Enables or explicitly disables anonymous logging.\n version: Sets the version, mainly used to resume a previous run.\n project: The name of the project to which this run will belong.\n tags: Tags associated with this run.\n log_model: Save checkpoints in wandb dir to upload on W&B servers.\n experiment: WandB experiment object\n entity: The team posting this run (default: your username or your default team)\n group: A unique string shared by all runs in a given group\n\n Example:\n >>> from pytorch_lightning.loggers import WandbLogger\n >>> from pytorch_lightning import Trainer\n >>> wandb_logger = WandbLogger()\n >>> trainer = Trainer(logger=wandb_logger)\n\n See Also:\n - `Tutorial <https://app.wandb.ai/cayush/pytorchlightning/reports/\n Use-Pytorch-Lightning-with-Weights-%26-Biases--Vmlldzo2NjQ1Mw>`__\n on how to use W&B with Pytorch Lightning.\n\n \"\"\"\n\n def __init__(self,\n name: Optional[str] = None,\n save_dir: Optional[str] = None,\n offline: bool = False,\n id: Optional[str] = None,\n anonymous: bool = False,\n version: Optional[str] = None,\n project: Optional[str] = None,\n tags: Optional[List[str]] = None,\n log_model: bool = False,\n experiment=None,\n entity=None,\n group: Optional[str] = None):\n if not _WANDB_AVAILABLE:\n raise ImportError('You want to use `wandb` logger which is not installed yet,' # pragma: no-cover\n ' install it with `pip install wandb`.')\n super().__init__()\n self._name = name\n self._save_dir = save_dir\n self._anonymous = 'allow' if anonymous else None\n self._id = version or id\n self._tags = tags\n self._project = project\n self._experiment = experiment\n self._offline = offline\n self._entity = entity\n self._log_model = log_model\n self._group = group\n\n def __getstate__(self):\n state = self.__dict__.copy()\n # args needed to reload correct experiment\n state['_id'] = self._experiment.id if self._experiment is not None else None\n\n # cannot be pickled\n state['_experiment'] = None\n return state\n\n @property\n def experiment(self) -> Run:\n r\"\"\"\n\n Actual wandb object. To use wandb features in your\n :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.\n\n Example::\n\n self.logger.experiment.some_wandb_function()\n\n \"\"\"\n if self._experiment is None:\n if self._offline:\n os.environ['WANDB_MODE'] = 'dryrun'\n self._experiment = wandb.init(\n name=self._name, dir=self._save_dir, project=self._project, anonymous=self._anonymous,\n reinit=True, id=self._id, resume='allow', tags=self._tags, entity=self._entity,\n group=self._group)\n # save checkpoints in wandb dir to upload on W&B servers\n if self._log_model:\n self.save_dir = self._experiment.dir\n return self._experiment\n\n def watch(self, model: nn.Module, log: str = 'gradients', log_freq: int = 100):\n self.experiment.watch(model, log=log, log_freq=log_freq)\n\n @rank_zero_only\n def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:\n params = self._convert_params(params)\n self.experiment.config.update(params, allow_val_change=True)\n\n @rank_zero_only\n def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:\n self.experiment.log(metrics, step=step)\n\n @property\n def name(self) -> str:\n # don't create an experiment if we don't have one\n name = self._experiment.project_name() if self._experiment else None\n return name\n\n @property\n def version(self) -> str:\n # don't create an experiment if we don't have one\n return self._experiment.id if self._experiment else None\n", "path": "pytorch_lightning/loggers/wandb.py"}]} | 3,060 | 145 |
gh_patches_debug_19841 | rasdani/github-patches | git_diff | svthalia__concrexit-3648 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Rate-limit and stricter validation on member registration view
See responsible disclosure email. https://staging.thalia.nu/association/register/member/ triggers sending an email. We should ratelimit it quite strictly.
This email also contains information (name, more?) provided by the user. While that information is properly HTML-escaped, some mail clients do render urls in it as clickable. And for example inserting newlines may be possible, so that someone could at least make the emails ugly.
So it wouldn't hurt to perform some extra validation of names.
</issue>
<code>
[start of website/registrations/views.py]
1 """Views provided by the registrations package."""
2 from django.conf import settings
3 from django.contrib import messages
4 from django.contrib.admin.views.decorators import staff_member_required
5 from django.contrib.auth.decorators import login_required, permission_required
6 from django.db.models import Q
7 from django.http import Http404
8 from django.shortcuts import get_object_or_404, redirect
9 from django.template.defaultfilters import floatformat
10 from django.urls import reverse
11 from django.utils import timezone
12 from django.utils.decorators import method_decorator
13 from django.views import View
14 from django.views.generic import CreateView, FormView
15 from django.views.generic.base import TemplateResponseMixin, TemplateView
16
17 from members.decorators import membership_required
18 from members.models import Membership
19
20 from . import emails, forms, services
21 from .models import Entry, Reference, Registration, Renewal
22
23
24 class BecomeAMemberView(TemplateView):
25 """View that render a HTML template with context data."""
26
27 template_name = "registrations/become_a_member.html"
28
29 def get_context_data(self, **kwargs):
30 context = super().get_context_data(**kwargs)
31 context["year_fees"] = floatformat(
32 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2
33 )
34 context["study_fees"] = floatformat(
35 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2
36 )
37 return context
38
39
40 @method_decorator(staff_member_required, name="dispatch")
41 @method_decorator(
42 permission_required("registrations.review_entries"),
43 name="dispatch",
44 )
45 class EntryAdminView(View):
46 """View that handles the processing of entries."""
47
48 def post(self, request, *args, **kwargs):
49 action = request.POST.get("action")
50 entry = get_object_or_404(Entry, pk=kwargs["pk"])
51
52 registration = getattr(entry, "registration", None)
53 renewal = getattr(entry, "renewal", None)
54
55 if action == "accept":
56 if registration is not None:
57 if not registration.check_user_is_unique():
58 messages.error(
59 request,
60 f"Could not accept {registration}. Username or email is not unique.",
61 )
62 else:
63 services.accept_registration(registration, actor=request.user)
64 messages.success(request, f"Successfully accepted {registration}.")
65 elif renewal is not None: # pragma: no cover
66 services.accept_renewal(renewal, actor=request.user)
67 messages.success(request, f"Successfully accepted {renewal}.")
68 elif action == "reject":
69 if registration is not None:
70 services.reject_registration(registration, actor=request.user)
71 messages.success(request, f"Successfully rejected {registration}.")
72 elif renewal is not None: # pragma: no cover
73 services.reject_renewal(renewal, actor=request.user)
74 messages.success(request, f"Successfully rejected {renewal}.")
75 elif action == "resend":
76 if registration is not None:
77 emails.send_registration_email_confirmation(entry.registration)
78 messages.success(
79 request, f"Resent registration email of {registration}."
80 )
81 else:
82 messages.error(request, "Cannot resend renewal.")
83 elif action == "revert": # pragma: no cover
84 if registration is not None:
85 services.revert_registration(registration, actor=request.user)
86 messages.success(
87 request, f"Successfully reverted registration {registration}."
88 )
89 elif renewal is not None:
90 services.revert_renewal(renewal, actor=request.user)
91 messages.success(request, f"Successfully reverted renewal {renewal}.")
92
93 redirect_model = "registration" if registration is not None else "renewal"
94 return redirect(f"admin:registrations_{redirect_model}_change", kwargs["pk"])
95
96
97 class ConfirmEmailView(View, TemplateResponseMixin):
98 """View that confirms the email address of the provided registration."""
99
100 template_name = "registrations/confirm_email.html"
101
102 def get(self, request, *args, **kwargs):
103 registration = get_object_or_404(Registration, pk=kwargs["pk"])
104
105 if registration.status == Registration.STATUS_CONFIRM:
106 services.confirm_registration(registration)
107
108 if registration.status != Registration.STATUS_REVIEW:
109 raise Http404
110
111 return self.render_to_response({})
112
113
114 class BaseRegistrationFormView(FormView):
115 """View that renders a membership registration form."""
116
117 form_class = forms.MemberRegistrationForm
118 template_name = "registrations/register_member.html"
119
120 def get_context_data(self, **kwargs):
121 context = super().get_context_data(**kwargs)
122 context["google_api_key"] = settings.GOOGLE_PLACES_API_KEY
123 context["year_fees"] = floatformat(
124 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2
125 )
126 context["study_fees"] = floatformat(
127 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2
128 )
129 return context
130
131 def get(self, request, *args, **kwargs):
132 if request.user.is_authenticated:
133 return redirect("registrations:renew")
134 return super().get(request, args, kwargs)
135
136 def form_valid(self, form):
137 form.save()
138 emails.send_registration_email_confirmation(form.instance)
139 return redirect("registrations:register-success")
140
141
142 class MemberRegistrationFormView(BaseRegistrationFormView):
143 """View that renders the `member` membership registration form."""
144
145 form_class = forms.MemberRegistrationForm
146 template_name = "registrations/register_member.html"
147
148 def get_context_data(self, **kwargs):
149 context = super().get_context_data(**kwargs)
150 context["tpay_enabled"] = (
151 settings.THALIA_PAY_ENABLED_PAYMENT_METHOD
152 and settings.THALIA_PAY_FOR_NEW_MEMBERS
153 )
154 return context
155
156 def post(self, request, *args, **kwargs):
157 request.POST = request.POST.dict()
158 request.POST["language"] = request.LANGUAGE_CODE
159 request.POST["membership_type"] = Membership.MEMBER
160 return super().post(request, *args, **kwargs)
161
162
163 class BenefactorRegistrationFormView(BaseRegistrationFormView):
164 """View that renders the `benefactor` membership registration form."""
165
166 form_class = forms.BenefactorRegistrationForm
167 template_name = "registrations/register_benefactor.html"
168
169 def get_context_data(self, **kwargs):
170 context = super().get_context_data(**kwargs)
171 context["tpay_enabled"] = (
172 settings.THALIA_PAY_ENABLED_PAYMENT_METHOD
173 and settings.THALIA_PAY_FOR_NEW_MEMBERS
174 )
175 return context
176
177 def post(self, request, *args, **kwargs):
178 request.POST = request.POST.dict()
179 request.POST["language"] = request.LANGUAGE_CODE
180 request.POST["membership_type"] = Membership.BENEFACTOR
181 request.POST["length"] = Entry.MEMBERSHIP_YEAR
182 request.POST["remarks"] = (
183 "Registered as iCIS employee" if "icis_employee" in request.POST else ""
184 )
185 request.POST["no_references"] = "icis_employee" in request.POST
186 return super().post(request, *args, **kwargs)
187
188
189 @method_decorator(login_required, name="dispatch")
190 class RenewalFormView(FormView):
191 """View that renders the membership renewal form."""
192
193 form_class = forms.RenewalForm
194 template_name = "registrations/renewal.html"
195
196 def get_context_data(self, **kwargs):
197 context = super().get_context_data(**kwargs)
198 context["year_fees"] = floatformat(
199 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2
200 )
201 context["study_fees"] = floatformat(
202 settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2
203 )
204 context["latest_membership"] = self.request.member.latest_membership
205 context["latest_renewal"] = Renewal.objects.filter(
206 Q(member=self.request.member)
207 & (
208 Q(status=Registration.STATUS_ACCEPTED)
209 | Q(status=Registration.STATUS_REVIEW)
210 )
211 ).last()
212 context["was_member"] = Membership.objects.filter(
213 user=self.request.member, type=Membership.MEMBER
214 ).exists()
215
216 context["benefactor_type"] = Membership.BENEFACTOR
217 return context
218
219 def get_form(self, form_class=None):
220 form = super().get_form(form_class)
221 member = self.request.member
222 if member is not None and member.latest_membership is not None:
223 latest_membership = member.latest_membership
224 # If latest membership has not ended or does not ends
225 # within 1 month: do not show 'year' length and disable benefactor option
226 hide_year_choice = not (
227 latest_membership is not None
228 and latest_membership.until is not None
229 and (latest_membership.until - timezone.now().date()).days <= 31
230 )
231
232 if hide_year_choice:
233 form.fields["length"].choices = [
234 c
235 for c in form.fields["length"].choices
236 if c[0] != Entry.MEMBERSHIP_YEAR
237 ]
238 form.fields["membership_type"].choices = [
239 c
240 for c in form.fields["membership_type"].choices
241 if c[0] != Membership.BENEFACTOR
242 ]
243
244 return form
245
246 def post(self, request, *args, **kwargs):
247 request.POST = request.POST.dict()
248 if request.member.latest_membership.type == Membership.BENEFACTOR:
249 request.POST["membership_type"] = Membership.BENEFACTOR
250 request.POST["length"] = Entry.MEMBERSHIP_YEAR
251 request.POST["member"] = request.member.pk
252 request.POST["remarks"] = ""
253 request.POST["no_references"] = True
254
255 if request.POST["membership_type"] == Membership.BENEFACTOR:
256 request.POST["no_references"] = False
257 if Membership.objects.filter(
258 user=request.member, type=Membership.MEMBER
259 ).exists():
260 request.POST["remarks"] = "Was a Thalia member in the past."
261 request.POST["no_references"] = True
262 if "icis_employee" in request.POST:
263 request.POST["remarks"] = "Registered as iCIS employee."
264 request.POST["no_references"] = True
265
266 return super().post(request, *args, **kwargs)
267
268 def form_valid(self, form):
269 renewal = form.save()
270 if not renewal.no_references:
271 emails.send_references_information_message(renewal)
272 emails.send_new_renewal_board_message(renewal)
273 return redirect("registrations:renew-success")
274
275
276 @method_decorator(login_required, name="dispatch")
277 @method_decorator(membership_required, name="dispatch")
278 class ReferenceCreateView(CreateView):
279 """View that renders a reference creation form."""
280
281 model = Reference
282 form_class = forms.ReferenceForm
283 template_name = "registrations/reference.html"
284 entry = None
285 success = False
286
287 def get_success_url(self):
288 return reverse("registrations:reference-success", args=(self.entry.pk,))
289
290 def get_context_data(self, **kwargs):
291 context = super().get_context_data(**kwargs)
292
293 context["success"] = self.success
294 try:
295 context["name"] = self.entry.registration.get_full_name()
296 except Registration.DoesNotExist:
297 context["name"] = self.entry.renewal.member.get_full_name()
298
299 return context
300
301 def dispatch(self, request, *args, **kwargs):
302 self.entry = get_object_or_404(Entry, pk=kwargs.get("pk"))
303
304 if (
305 self.entry.no_references
306 or self.entry.membership_type != Membership.BENEFACTOR
307 ):
308 raise Http404
309
310 return super().dispatch(request, *args, **kwargs)
311
312 def post(self, request, *args, **kwargs):
313 request.POST = request.POST.dict()
314 request.POST["member"] = request.member.pk
315 request.POST["entry"] = kwargs["pk"]
316 return super().post(request, *args, **kwargs)
317
[end of website/registrations/views.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/website/registrations/views.py b/website/registrations/views.py
--- a/website/registrations/views.py
+++ b/website/registrations/views.py
@@ -14,6 +14,8 @@
from django.views.generic import CreateView, FormView
from django.views.generic.base import TemplateResponseMixin, TemplateView
+from django_ratelimit.decorators import ratelimit
+
from members.decorators import membership_required
from members.models import Membership
@@ -138,6 +140,10 @@
emails.send_registration_email_confirmation(form.instance)
return redirect("registrations:register-success")
+ @method_decorator(ratelimit(key="ip", rate="10/d"))
+ def post(self, request, *args, **kwargs):
+ return super().post(request, *args, **kwargs)
+
class MemberRegistrationFormView(BaseRegistrationFormView):
"""View that renders the `member` membership registration form."""
| {"golden_diff": "diff --git a/website/registrations/views.py b/website/registrations/views.py\n--- a/website/registrations/views.py\n+++ b/website/registrations/views.py\n@@ -14,6 +14,8 @@\n from django.views.generic import CreateView, FormView\n from django.views.generic.base import TemplateResponseMixin, TemplateView\n \n+from django_ratelimit.decorators import ratelimit\n+\n from members.decorators import membership_required\n from members.models import Membership\n \n@@ -138,6 +140,10 @@\n emails.send_registration_email_confirmation(form.instance)\n return redirect(\"registrations:register-success\")\n \n+ @method_decorator(ratelimit(key=\"ip\", rate=\"10/d\"))\n+ def post(self, request, *args, **kwargs):\n+ return super().post(request, *args, **kwargs)\n+\n \n class MemberRegistrationFormView(BaseRegistrationFormView):\n \"\"\"View that renders the `member` membership registration form.\"\"\"\n", "issue": "Rate-limit and stricter validation on member registration view\nSee responsible disclosure email. https://staging.thalia.nu/association/register/member/ triggers sending an email. We should ratelimit it quite strictly. \r\n\r\nThis email also contains information (name, more?) provided by the user. While that information is properly HTML-escaped, some mail clients do render urls in it as clickable. And for example inserting newlines may be possible, so that someone could at least make the emails ugly. \r\nSo it wouldn't hurt to perform some extra validation of names. \n", "before_files": [{"content": "\"\"\"Views provided by the registrations package.\"\"\"\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.db.models import Q\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.defaultfilters import floatformat\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.generic import CreateView, FormView\nfrom django.views.generic.base import TemplateResponseMixin, TemplateView\n\nfrom members.decorators import membership_required\nfrom members.models import Membership\n\nfrom . import emails, forms, services\nfrom .models import Entry, Reference, Registration, Renewal\n\n\nclass BecomeAMemberView(TemplateView):\n \"\"\"View that render a HTML template with context data.\"\"\"\n\n template_name = \"registrations/become_a_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n return context\n\n\n@method_decorator(staff_member_required, name=\"dispatch\")\n@method_decorator(\n permission_required(\"registrations.review_entries\"),\n name=\"dispatch\",\n)\nclass EntryAdminView(View):\n \"\"\"View that handles the processing of entries.\"\"\"\n\n def post(self, request, *args, **kwargs):\n action = request.POST.get(\"action\")\n entry = get_object_or_404(Entry, pk=kwargs[\"pk\"])\n\n registration = getattr(entry, \"registration\", None)\n renewal = getattr(entry, \"renewal\", None)\n\n if action == \"accept\":\n if registration is not None:\n if not registration.check_user_is_unique():\n messages.error(\n request,\n f\"Could not accept {registration}. Username or email is not unique.\",\n )\n else:\n services.accept_registration(registration, actor=request.user)\n messages.success(request, f\"Successfully accepted {registration}.\")\n elif renewal is not None: # pragma: no cover\n services.accept_renewal(renewal, actor=request.user)\n messages.success(request, f\"Successfully accepted {renewal}.\")\n elif action == \"reject\":\n if registration is not None:\n services.reject_registration(registration, actor=request.user)\n messages.success(request, f\"Successfully rejected {registration}.\")\n elif renewal is not None: # pragma: no cover\n services.reject_renewal(renewal, actor=request.user)\n messages.success(request, f\"Successfully rejected {renewal}.\")\n elif action == \"resend\":\n if registration is not None:\n emails.send_registration_email_confirmation(entry.registration)\n messages.success(\n request, f\"Resent registration email of {registration}.\"\n )\n else:\n messages.error(request, \"Cannot resend renewal.\")\n elif action == \"revert\": # pragma: no cover\n if registration is not None:\n services.revert_registration(registration, actor=request.user)\n messages.success(\n request, f\"Successfully reverted registration {registration}.\"\n )\n elif renewal is not None:\n services.revert_renewal(renewal, actor=request.user)\n messages.success(request, f\"Successfully reverted renewal {renewal}.\")\n\n redirect_model = \"registration\" if registration is not None else \"renewal\"\n return redirect(f\"admin:registrations_{redirect_model}_change\", kwargs[\"pk\"])\n\n\nclass ConfirmEmailView(View, TemplateResponseMixin):\n \"\"\"View that confirms the email address of the provided registration.\"\"\"\n\n template_name = \"registrations/confirm_email.html\"\n\n def get(self, request, *args, **kwargs):\n registration = get_object_or_404(Registration, pk=kwargs[\"pk\"])\n\n if registration.status == Registration.STATUS_CONFIRM:\n services.confirm_registration(registration)\n\n if registration.status != Registration.STATUS_REVIEW:\n raise Http404\n\n return self.render_to_response({})\n\n\nclass BaseRegistrationFormView(FormView):\n \"\"\"View that renders a membership registration form.\"\"\"\n\n form_class = forms.MemberRegistrationForm\n template_name = \"registrations/register_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"google_api_key\"] = settings.GOOGLE_PLACES_API_KEY\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n return context\n\n def get(self, request, *args, **kwargs):\n if request.user.is_authenticated:\n return redirect(\"registrations:renew\")\n return super().get(request, args, kwargs)\n\n def form_valid(self, form):\n form.save()\n emails.send_registration_email_confirmation(form.instance)\n return redirect(\"registrations:register-success\")\n\n\nclass MemberRegistrationFormView(BaseRegistrationFormView):\n \"\"\"View that renders the `member` membership registration form.\"\"\"\n\n form_class = forms.MemberRegistrationForm\n template_name = \"registrations/register_member.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"tpay_enabled\"] = (\n settings.THALIA_PAY_ENABLED_PAYMENT_METHOD\n and settings.THALIA_PAY_FOR_NEW_MEMBERS\n )\n return context\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"language\"] = request.LANGUAGE_CODE\n request.POST[\"membership_type\"] = Membership.MEMBER\n return super().post(request, *args, **kwargs)\n\n\nclass BenefactorRegistrationFormView(BaseRegistrationFormView):\n \"\"\"View that renders the `benefactor` membership registration form.\"\"\"\n\n form_class = forms.BenefactorRegistrationForm\n template_name = \"registrations/register_benefactor.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"tpay_enabled\"] = (\n settings.THALIA_PAY_ENABLED_PAYMENT_METHOD\n and settings.THALIA_PAY_FOR_NEW_MEMBERS\n )\n return context\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"language\"] = request.LANGUAGE_CODE\n request.POST[\"membership_type\"] = Membership.BENEFACTOR\n request.POST[\"length\"] = Entry.MEMBERSHIP_YEAR\n request.POST[\"remarks\"] = (\n \"Registered as iCIS employee\" if \"icis_employee\" in request.POST else \"\"\n )\n request.POST[\"no_references\"] = \"icis_employee\" in request.POST\n return super().post(request, *args, **kwargs)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass RenewalFormView(FormView):\n \"\"\"View that renders the membership renewal form.\"\"\"\n\n form_class = forms.RenewalForm\n template_name = \"registrations/renewal.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"year_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_YEAR], 2\n )\n context[\"study_fees\"] = floatformat(\n settings.MEMBERSHIP_PRICES[Entry.MEMBERSHIP_STUDY], 2\n )\n context[\"latest_membership\"] = self.request.member.latest_membership\n context[\"latest_renewal\"] = Renewal.objects.filter(\n Q(member=self.request.member)\n & (\n Q(status=Registration.STATUS_ACCEPTED)\n | Q(status=Registration.STATUS_REVIEW)\n )\n ).last()\n context[\"was_member\"] = Membership.objects.filter(\n user=self.request.member, type=Membership.MEMBER\n ).exists()\n\n context[\"benefactor_type\"] = Membership.BENEFACTOR\n return context\n\n def get_form(self, form_class=None):\n form = super().get_form(form_class)\n member = self.request.member\n if member is not None and member.latest_membership is not None:\n latest_membership = member.latest_membership\n # If latest membership has not ended or does not ends\n # within 1 month: do not show 'year' length and disable benefactor option\n hide_year_choice = not (\n latest_membership is not None\n and latest_membership.until is not None\n and (latest_membership.until - timezone.now().date()).days <= 31\n )\n\n if hide_year_choice:\n form.fields[\"length\"].choices = [\n c\n for c in form.fields[\"length\"].choices\n if c[0] != Entry.MEMBERSHIP_YEAR\n ]\n form.fields[\"membership_type\"].choices = [\n c\n for c in form.fields[\"membership_type\"].choices\n if c[0] != Membership.BENEFACTOR\n ]\n\n return form\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n if request.member.latest_membership.type == Membership.BENEFACTOR:\n request.POST[\"membership_type\"] = Membership.BENEFACTOR\n request.POST[\"length\"] = Entry.MEMBERSHIP_YEAR\n request.POST[\"member\"] = request.member.pk\n request.POST[\"remarks\"] = \"\"\n request.POST[\"no_references\"] = True\n\n if request.POST[\"membership_type\"] == Membership.BENEFACTOR:\n request.POST[\"no_references\"] = False\n if Membership.objects.filter(\n user=request.member, type=Membership.MEMBER\n ).exists():\n request.POST[\"remarks\"] = \"Was a Thalia member in the past.\"\n request.POST[\"no_references\"] = True\n if \"icis_employee\" in request.POST:\n request.POST[\"remarks\"] = \"Registered as iCIS employee.\"\n request.POST[\"no_references\"] = True\n\n return super().post(request, *args, **kwargs)\n\n def form_valid(self, form):\n renewal = form.save()\n if not renewal.no_references:\n emails.send_references_information_message(renewal)\n emails.send_new_renewal_board_message(renewal)\n return redirect(\"registrations:renew-success\")\n\n\n@method_decorator(login_required, name=\"dispatch\")\n@method_decorator(membership_required, name=\"dispatch\")\nclass ReferenceCreateView(CreateView):\n \"\"\"View that renders a reference creation form.\"\"\"\n\n model = Reference\n form_class = forms.ReferenceForm\n template_name = \"registrations/reference.html\"\n entry = None\n success = False\n\n def get_success_url(self):\n return reverse(\"registrations:reference-success\", args=(self.entry.pk,))\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context[\"success\"] = self.success\n try:\n context[\"name\"] = self.entry.registration.get_full_name()\n except Registration.DoesNotExist:\n context[\"name\"] = self.entry.renewal.member.get_full_name()\n\n return context\n\n def dispatch(self, request, *args, **kwargs):\n self.entry = get_object_or_404(Entry, pk=kwargs.get(\"pk\"))\n\n if (\n self.entry.no_references\n or self.entry.membership_type != Membership.BENEFACTOR\n ):\n raise Http404\n\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n request.POST = request.POST.dict()\n request.POST[\"member\"] = request.member.pk\n request.POST[\"entry\"] = kwargs[\"pk\"]\n return super().post(request, *args, **kwargs)\n", "path": "website/registrations/views.py"}]} | 4,077 | 208 |
gh_patches_debug_10225 | rasdani/github-patches | git_diff | wagtail__wagtail-822 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Unicode content in rich text causes error
Steps to reproduce:
1. Using the wagtail interface, create a new instance of a page derived model which has a rich text field
2. In the rich text field, include unicode characters such as: `©` or `’`
3. Publish the page
The page will be published fine, and probably rendered normally through a template.
4. Return to the wagtail admin, and edit the newly created page
The rich text field is not rendered in the editing page. Inspecting the html shows that wagtail is attempting to render the field, its heading is there. But the field is not rendered.
If you attempt to publish this new page, the form will submit a page with no content in the rich text field.
</issue>
<code>
[start of wagtail/utils/widgets.py]
1 from django.forms.widgets import Widget
2 from django.utils.safestring import mark_safe
3
4
5 class WidgetWithScript(Widget):
6 def render(self, name, value, attrs=None):
7 widget = super(WidgetWithScript, self).render(name, value, attrs)
8
9 final_attrs = self.build_attrs(attrs, name=name)
10 id_ = final_attrs.get('id', None)
11 if 'id_' is None:
12 return widget
13
14 js = self.render_js_init(id_, name, value)
15 out = '{0}<script>{1}</script>'.format(widget, js)
16 return mark_safe(out)
17
18 def render_js_init(self, id_, name, value):
19 return ''
20
[end of wagtail/utils/widgets.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/wagtail/utils/widgets.py b/wagtail/utils/widgets.py
--- a/wagtail/utils/widgets.py
+++ b/wagtail/utils/widgets.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import, unicode_literals
+
from django.forms.widgets import Widget
from django.utils.safestring import mark_safe
@@ -8,7 +10,7 @@
final_attrs = self.build_attrs(attrs, name=name)
id_ = final_attrs.get('id', None)
- if 'id_' is None:
+ if id_ is None:
return widget
js = self.render_js_init(id_, name, value)
| {"golden_diff": "diff --git a/wagtail/utils/widgets.py b/wagtail/utils/widgets.py\n--- a/wagtail/utils/widgets.py\n+++ b/wagtail/utils/widgets.py\n@@ -1,3 +1,5 @@\n+from __future__ import absolute_import, unicode_literals\n+\n from django.forms.widgets import Widget\n from django.utils.safestring import mark_safe\n \n@@ -8,7 +10,7 @@\n \n final_attrs = self.build_attrs(attrs, name=name)\n id_ = final_attrs.get('id', None)\n- if 'id_' is None:\n+ if id_ is None:\n return widget\n \n js = self.render_js_init(id_, name, value)\n", "issue": "Unicode content in rich text causes error\nSteps to reproduce:\n1. Using the wagtail interface, create a new instance of a page derived model which has a rich text field\n2. In the rich text field, include unicode characters such as: `\u00a9` or `\u2019`\n3. Publish the page\n \n The page will be published fine, and probably rendered normally through a template.\n4. Return to the wagtail admin, and edit the newly created page\n \n The rich text field is not rendered in the editing page. Inspecting the html shows that wagtail is attempting to render the field, its heading is there. But the field is not rendered.\n \n If you attempt to publish this new page, the form will submit a page with no content in the rich text field.\n\n", "before_files": [{"content": "from django.forms.widgets import Widget\nfrom django.utils.safestring import mark_safe\n\n\nclass WidgetWithScript(Widget):\n def render(self, name, value, attrs=None):\n widget = super(WidgetWithScript, self).render(name, value, attrs)\n\n final_attrs = self.build_attrs(attrs, name=name)\n id_ = final_attrs.get('id', None)\n if 'id_' is None:\n return widget\n\n js = self.render_js_init(id_, name, value)\n out = '{0}<script>{1}</script>'.format(widget, js)\n return mark_safe(out)\n\n def render_js_init(self, id_, name, value):\n return ''\n", "path": "wagtail/utils/widgets.py"}]} | 875 | 146 |
gh_patches_debug_4403 | rasdani/github-patches | git_diff | learningequality__kolibri-5037 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Facing error while kolibri 0.12.0 deb file installation
### Observed behavior
After running below command it shows error:
**sudo dpkg -i kolibri_0.12.0b4-0ubuntu1_all.deb**
File downloaded from https://github.com/learningequality/kolibri/releases.

### Context
Kolibri version : Kolibri 0.12.0
Operating system : Ubuntu 14.04
### Screenshots:


</issue>
<code>
[start of kolibri/__init__.py]
1 """
2 CAUTION! Keep everything here at at minimum. Do not import stuff.
3 This module is imported in setup.py, so you cannot for instance
4 import a dependency.
5 """
6 from __future__ import absolute_import
7 from __future__ import print_function
8 from __future__ import unicode_literals
9
10 from .utils import env
11 from .utils.version import get_version
12
13 # Setup the environment before loading anything else from the application
14 env.set_env()
15
16 #: This may not be the exact version as it's subject to modification with
17 #: get_version() - use ``kolibri.__version__`` for the exact version string.
18 VERSION = (0, 12, 0, 'alpha', 0)
19
20 __author__ = 'Learning Equality'
21 __email__ = '[email protected]'
22 __version__ = str(get_version(VERSION))
23
[end of kolibri/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/kolibri/__init__.py b/kolibri/__init__.py
--- a/kolibri/__init__.py
+++ b/kolibri/__init__.py
@@ -15,7 +15,7 @@
#: This may not be the exact version as it's subject to modification with
#: get_version() - use ``kolibri.__version__`` for the exact version string.
-VERSION = (0, 12, 0, 'alpha', 0)
+VERSION = (0, 12, 0, 'beta', 0)
__author__ = 'Learning Equality'
__email__ = '[email protected]'
| {"golden_diff": "diff --git a/kolibri/__init__.py b/kolibri/__init__.py\n--- a/kolibri/__init__.py\n+++ b/kolibri/__init__.py\n@@ -15,7 +15,7 @@\n \n #: This may not be the exact version as it's subject to modification with\n #: get_version() - use ``kolibri.__version__`` for the exact version string.\n-VERSION = (0, 12, 0, 'alpha', 0)\n+VERSION = (0, 12, 0, 'beta', 0)\n \n __author__ = 'Learning Equality'\n __email__ = '[email protected]'\n", "issue": "Facing error while kolibri 0.12.0 deb file installation\n### Observed behavior\r\nAfter running below command it shows error:\r\n**sudo dpkg -i kolibri_0.12.0b4-0ubuntu1_all.deb**\r\n\r\nFile downloaded from https://github.com/learningequality/kolibri/releases.\r\n\r\n\r\n\r\n### Context\r\nKolibri version : Kolibri 0.12.0\r\nOperating system : Ubuntu 14.04\r\n\r\n### Screenshots:\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nCAUTION! Keep everything here at at minimum. Do not import stuff.\nThis module is imported in setup.py, so you cannot for instance\nimport a dependency.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom .utils import env\nfrom .utils.version import get_version\n\n# Setup the environment before loading anything else from the application\nenv.set_env()\n\n#: This may not be the exact version as it's subject to modification with\n#: get_version() - use ``kolibri.__version__`` for the exact version string.\nVERSION = (0, 12, 0, 'alpha', 0)\n\n__author__ = 'Learning Equality'\n__email__ = '[email protected]'\n__version__ = str(get_version(VERSION))\n", "path": "kolibri/__init__.py"}]} | 1,043 | 146 |
gh_patches_debug_37466 | rasdani/github-patches | git_diff | holoviz__panel-5427 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Serialization Error When Visualizing PyDeck TextLayer
#### ALL software version info
PyDeck 0.8, Panel 1.2
#### Description of expected behavior and the observed behavior
I recently coded up a variation of the PyDeck TextLayer example [from the PyDeck website](https://deckgl.readthedocs.io/en/latest/gallery/text_layer.html) and then attempted to integrate that into a Panel application. Panel is having a serialization issue with the PyDeck String type.
#### Complete, minimal, self-contained example code that reproduces the issue
[This Google Colab notebook](https://colab.research.google.com/drive/13ziK2OrM7n6zmqQDKJQ1_fvrFCCj2oGf?usp=sharing) shows the visualization running without issue in PyDeck and then shows my attempt to render that same visualization via Panel.
</issue>
<code>
[start of panel/pane/deckgl.py]
1 """
2 Defines a PyDeck Pane which renders a PyDeck plot using a PyDeckPlot
3 bokeh model.
4 """
5 from __future__ import annotations
6
7 import json
8 import sys
9
10 from collections import defaultdict
11 from typing import (
12 TYPE_CHECKING, Any, ClassVar, Dict, Mapping, Optional,
13 )
14
15 import numpy as np
16 import param
17
18 from bokeh.models import ColumnDataSource
19 from pyviz_comms import JupyterComm
20
21 from ..util import is_dataframe, lazy_load
22 from .base import ModelPane
23
24 if TYPE_CHECKING:
25 from bokeh.document import Document
26 from bokeh.model import Model
27 from pyviz_comms import Comm
28
29
30 def lower_camel_case_keys(attrs):
31 """
32 Makes all the keys in a dictionary camel-cased and lower-case
33
34 Parameters
35 ----------
36 attrs : dict
37 Dictionary for which all the keys should be converted to camel-case
38 """
39 for snake_key in list(attrs.keys()):
40 if '_' not in snake_key:
41 continue
42 camel_key = lower_first_letter(to_camel_case(snake_key))
43 attrs[camel_key] = attrs.pop(snake_key)
44
45
46 def to_camel_case(snake_case: str) -> str:
47 """
48 Makes a snake case string into a camel case one
49
50 Parameters
51 -----------
52 snake_case : str
53 Snake-cased string (e.g., "snake_cased") to be converted to camel-case (e.g., "camelCase")
54 """
55 output_str = ''
56 should_upper_case = False
57 for c in snake_case:
58 if c == '_':
59 should_upper_case = True
60 continue
61 output_str = output_str + c.upper() if should_upper_case else output_str + c
62 should_upper_case = False
63 return output_str
64
65
66 def lower_first_letter(s: str) -> str:
67 return s[:1].lower() + s[1:] if s else ''
68
69
70 def recurse_data(data):
71 if hasattr(data, 'to_json'):
72 data = data.__dict__
73 if isinstance(data, dict):
74 data = dict(data)
75 lower_camel_case_keys(data)
76 data = {k: recurse_data(v) if k != 'data' else v
77 for k, v in data.items()}
78 elif isinstance(data, list):
79 data = [recurse_data(d) for d in data]
80 return data
81
82
83 class DeckGL(ModelPane):
84 """
85 The `DeckGL` pane renders the Deck.gl
86 JSON specification as well as PyDeck plots inside a panel.
87
88 Deck.gl is a very powerful WebGL-powered framework for visual exploratory
89 data analysis of large datasets.
90
91 Reference: https://panel.holoviz.org/reference/panes/DeckGL.html
92
93 :Example:
94
95 >>> pn.extension('deckgl')
96 >>> DeckGL(
97 ... some_deckgl_dict_or_pydeck_object,
98 ... mapbox_api_key=MAPBOX_KEY, height=600
99 ... )
100 """
101
102 mapbox_api_key = param.String(default=None, doc="""
103 The MapBox API key if not supplied by a PyDeck object.""")
104
105 tooltips = param.ClassSelector(default=True, class_=(bool, dict), doc="""
106 Whether to enable tooltips""")
107
108 click_state = param.Dict(default={}, doc="""
109 Contains the last click event on the DeckGL plot.""")
110
111 hover_state = param.Dict(default={}, doc="""
112 The current hover state of the DeckGL plot.""")
113
114 view_state = param.Dict(default={}, doc="""
115 The current view state of the DeckGL plot.""")
116
117 throttle = param.Dict(default={'view': 200, 'hover': 200}, doc="""
118 Throttling timeout (in milliseconds) for view state and hover
119 events sent from the frontend.""")
120
121 _rename: ClassVar[Mapping[str, str | None]] = {
122 'click_state': 'clickState', 'hover_state': 'hoverState',
123 'view_state': 'viewState', 'tooltips': 'tooltip'
124 }
125
126 _updates: ClassVar[bool] = True
127
128 priority: ClassVar[float | bool | None] = None
129
130 @classmethod
131 def applies(cls, obj: Any) -> float | bool | None:
132 if cls.is_pydeck(obj):
133 return 0.8
134 elif isinstance(obj, (dict, str)):
135 return 0
136 return False
137
138 @classmethod
139 def is_pydeck(cls, obj):
140 if 'pydeck' in sys.modules:
141 import pydeck
142 return isinstance(obj, pydeck.bindings.deck.Deck)
143 return False
144
145 @classmethod
146 def _process_data(cls, data):
147 columns = defaultdict(list)
148 for d in data:
149 for col, val in d.items():
150 columns[col].append(val)
151 return {col: np.asarray(vals) for col, vals in columns.items()}
152
153 @classmethod
154 def _update_sources(cls, json_data, sources):
155 layers = json_data.get('layers', [])
156
157 # Create index of sources by columns
158 source_columns = defaultdict(list)
159 for i, source in enumerate(sources):
160 key = tuple(sorted(source.data.keys()))
161 source_columns[key].append((i, source))
162
163 # Process
164 unprocessed, unused = [], list(sources)
165 for layer in layers:
166 data = layer.get('data')
167 if is_dataframe(data):
168 data = ColumnDataSource.from_df(data)
169 elif (isinstance(data, list) and data
170 and isinstance(data[0], dict)):
171 data = cls._process_data(data)
172 else:
173 continue
174
175 key = tuple(sorted(data.keys()))
176 existing = source_columns.get(key)
177 if existing:
178 index, cds = existing.pop()
179 layer['data'] = index
180 updates = {}
181 for col, values in data.items():
182 if not np.array_equal(data[col], cds.data[col]):
183 updates[col] = values
184 if updates:
185 cds.data.update(updates)
186 unused.remove(cds)
187 else:
188 unprocessed.append((layer, data))
189
190 for layer, data in unprocessed:
191 if unused:
192 cds = unused.pop()
193 cds.data = data
194 else:
195 cds = ColumnDataSource(data)
196 sources.append(cds)
197 layer['data'] = sources.index(cds)
198
199 def _transform_object(self, obj) -> Dict[str, Any]:
200 if self.object is None:
201 data, mapbox_api_key, tooltip = {}, self.mapbox_api_key, self.tooltips
202 elif isinstance(self.object, (str, dict)):
203 if isinstance(self.object, str):
204 data = json.loads(self.object)
205 else:
206 data = dict(self.object)
207 data['layers'] = [dict(layer) for layer in data.get('layers', [])]
208 mapbox_api_key = self.mapbox_api_key
209 tooltip = self.tooltips
210 else:
211 data = dict(self.object.__dict__)
212 mapbox_api_key = data.pop('mapbox_key', self.mapbox_api_key)
213 deck_widget = data.pop('deck_widget', None)
214 if isinstance(self.tooltips, dict) or deck_widget is None:
215 tooltip = self.tooltips
216 else:
217 tooltip = deck_widget.tooltip
218 data = {k: v for k, v in recurse_data(data).items() if v is not None}
219
220 # Delete undefined width and height
221 for view in data.get('views', []):
222 if view.get('width', False) is None:
223 view.pop('width')
224 if view.get('height', False) is None:
225 view.pop('height')
226
227 return dict(data=data, tooltip=tooltip, mapbox_api_key=mapbox_api_key or "")
228
229 def _get_model(
230 self, doc: Document, root: Optional[Model] = None,
231 parent: Optional[Model] = None, comm: Optional[Comm] = None
232 ) -> Model:
233 self._bokeh_model = DeckGLPlot = lazy_load(
234 'panel.models.deckgl', 'DeckGLPlot', isinstance(comm, JupyterComm), root
235 )
236 properties = self._get_properties(doc)
237 data = properties.pop('data')
238 properties['data_sources'] = sources = []
239 self._update_sources(data, sources)
240 properties['layers'] = data.pop('layers', [])
241 properties['initialViewState'] = data.pop('initialViewState', {})
242 model = DeckGLPlot(data=data, **properties)
243 root = root or model
244 self._link_props(model, ['clickState', 'hoverState', 'viewState'], doc, root, comm)
245 self._models[root.ref["id"]] = (model, parent)
246 return model
247
248 def _update(self, ref: str, model: Model) -> None:
249 properties = self._get_properties(model.document)
250 data = properties.pop('data')
251 self._update_sources(data, model.data_sources)
252 properties['data'] = data
253 properties['layers'] = data.pop('layers', [])
254 properties['initialViewState'] = data.pop('initialViewState', {})
255 model.update(**properties)
256
[end of panel/pane/deckgl.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/panel/pane/deckgl.py b/panel/pane/deckgl.py
--- a/panel/pane/deckgl.py
+++ b/panel/pane/deckgl.py
@@ -15,6 +15,7 @@
import numpy as np
import param
+from bokeh.core.serialization import Serializer
from bokeh.models import ColumnDataSource
from pyviz_comms import JupyterComm
@@ -123,6 +124,8 @@
'view_state': 'viewState', 'tooltips': 'tooltip'
}
+ _pydeck_encoders_are_added: ClassVar[bool] = False
+
_updates: ClassVar[bool] = True
priority: ClassVar[float | bool | None] = None
@@ -196,6 +199,36 @@
sources.append(cds)
layer['data'] = sources.index(cds)
+ @classmethod
+ def _add_pydeck_encoders(cls):
+ if cls._pydeck_encoders_are_added or 'pydeck' not in sys.modules:
+ return
+
+ from pydeck.types import String
+ def pydeck_string_encoder(obj, serializer):
+ return obj.value
+
+ Serializer._encoders[String] = pydeck_string_encoder
+
+ def _transform_deck_object(self, obj):
+ data = dict(obj.__dict__)
+ mapbox_api_key = data.pop('mapbox_key', "") or self.mapbox_api_key
+ deck_widget = data.pop('deck_widget', None)
+ if isinstance(self.tooltips, dict) or deck_widget is None:
+ tooltip = self.tooltips
+ else:
+ tooltip = deck_widget.tooltip
+ data = {k: v for k, v in recurse_data(data).items() if v is not None}
+
+ if "initialViewState" in data:
+ data["initialViewState"]={
+ k:v for k, v in data["initialViewState"].items() if v is not None
+ }
+
+ self._add_pydeck_encoders()
+
+ return data, tooltip, mapbox_api_key
+
def _transform_object(self, obj) -> Dict[str, Any]:
if self.object is None:
data, mapbox_api_key, tooltip = {}, self.mapbox_api_key, self.tooltips
@@ -208,14 +241,7 @@
mapbox_api_key = self.mapbox_api_key
tooltip = self.tooltips
else:
- data = dict(self.object.__dict__)
- mapbox_api_key = data.pop('mapbox_key', self.mapbox_api_key)
- deck_widget = data.pop('deck_widget', None)
- if isinstance(self.tooltips, dict) or deck_widget is None:
- tooltip = self.tooltips
- else:
- tooltip = deck_widget.tooltip
- data = {k: v for k, v in recurse_data(data).items() if v is not None}
+ data, tooltip, mapbox_api_key = self._transform_deck_object(self.object)
# Delete undefined width and height
for view in data.get('views', []):
| {"golden_diff": "diff --git a/panel/pane/deckgl.py b/panel/pane/deckgl.py\n--- a/panel/pane/deckgl.py\n+++ b/panel/pane/deckgl.py\n@@ -15,6 +15,7 @@\n import numpy as np\n import param\n \n+from bokeh.core.serialization import Serializer\n from bokeh.models import ColumnDataSource\n from pyviz_comms import JupyterComm\n \n@@ -123,6 +124,8 @@\n 'view_state': 'viewState', 'tooltips': 'tooltip'\n }\n \n+ _pydeck_encoders_are_added: ClassVar[bool] = False\n+\n _updates: ClassVar[bool] = True\n \n priority: ClassVar[float | bool | None] = None\n@@ -196,6 +199,36 @@\n sources.append(cds)\n layer['data'] = sources.index(cds)\n \n+ @classmethod\n+ def _add_pydeck_encoders(cls):\n+ if cls._pydeck_encoders_are_added or 'pydeck' not in sys.modules:\n+ return\n+\n+ from pydeck.types import String\n+ def pydeck_string_encoder(obj, serializer):\n+ return obj.value\n+\n+ Serializer._encoders[String] = pydeck_string_encoder\n+\n+ def _transform_deck_object(self, obj):\n+ data = dict(obj.__dict__)\n+ mapbox_api_key = data.pop('mapbox_key', \"\") or self.mapbox_api_key\n+ deck_widget = data.pop('deck_widget', None)\n+ if isinstance(self.tooltips, dict) or deck_widget is None:\n+ tooltip = self.tooltips\n+ else:\n+ tooltip = deck_widget.tooltip\n+ data = {k: v for k, v in recurse_data(data).items() if v is not None}\n+\n+ if \"initialViewState\" in data:\n+ data[\"initialViewState\"]={\n+ k:v for k, v in data[\"initialViewState\"].items() if v is not None\n+ }\n+\n+ self._add_pydeck_encoders()\n+\n+ return data, tooltip, mapbox_api_key\n+\n def _transform_object(self, obj) -> Dict[str, Any]:\n if self.object is None:\n data, mapbox_api_key, tooltip = {}, self.mapbox_api_key, self.tooltips\n@@ -208,14 +241,7 @@\n mapbox_api_key = self.mapbox_api_key\n tooltip = self.tooltips\n else:\n- data = dict(self.object.__dict__)\n- mapbox_api_key = data.pop('mapbox_key', self.mapbox_api_key)\n- deck_widget = data.pop('deck_widget', None)\n- if isinstance(self.tooltips, dict) or deck_widget is None:\n- tooltip = self.tooltips\n- else:\n- tooltip = deck_widget.tooltip\n- data = {k: v for k, v in recurse_data(data).items() if v is not None}\n+ data, tooltip, mapbox_api_key = self._transform_deck_object(self.object)\n \n # Delete undefined width and height\n for view in data.get('views', []):\n", "issue": "Serialization Error When Visualizing PyDeck TextLayer\n#### ALL software version info\r\nPyDeck 0.8, Panel 1.2\r\n\r\n#### Description of expected behavior and the observed behavior\r\nI recently coded up a variation of the PyDeck TextLayer example [from the PyDeck website](https://deckgl.readthedocs.io/en/latest/gallery/text_layer.html) and then attempted to integrate that into a Panel application. Panel is having a serialization issue with the PyDeck String type. \r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n[This Google Colab notebook](https://colab.research.google.com/drive/13ziK2OrM7n6zmqQDKJQ1_fvrFCCj2oGf?usp=sharing) shows the visualization running without issue in PyDeck and then shows my attempt to render that same visualization via Panel.\r\n\n", "before_files": [{"content": "\"\"\"\nDefines a PyDeck Pane which renders a PyDeck plot using a PyDeckPlot\nbokeh model.\n\"\"\"\nfrom __future__ import annotations\n\nimport json\nimport sys\n\nfrom collections import defaultdict\nfrom typing import (\n TYPE_CHECKING, Any, ClassVar, Dict, Mapping, Optional,\n)\n\nimport numpy as np\nimport param\n\nfrom bokeh.models import ColumnDataSource\nfrom pyviz_comms import JupyterComm\n\nfrom ..util import is_dataframe, lazy_load\nfrom .base import ModelPane\n\nif TYPE_CHECKING:\n from bokeh.document import Document\n from bokeh.model import Model\n from pyviz_comms import Comm\n\n\ndef lower_camel_case_keys(attrs):\n \"\"\"\n Makes all the keys in a dictionary camel-cased and lower-case\n\n Parameters\n ----------\n attrs : dict\n Dictionary for which all the keys should be converted to camel-case\n \"\"\"\n for snake_key in list(attrs.keys()):\n if '_' not in snake_key:\n continue\n camel_key = lower_first_letter(to_camel_case(snake_key))\n attrs[camel_key] = attrs.pop(snake_key)\n\n\ndef to_camel_case(snake_case: str) -> str:\n \"\"\"\n Makes a snake case string into a camel case one\n\n Parameters\n -----------\n snake_case : str\n Snake-cased string (e.g., \"snake_cased\") to be converted to camel-case (e.g., \"camelCase\")\n \"\"\"\n output_str = ''\n should_upper_case = False\n for c in snake_case:\n if c == '_':\n should_upper_case = True\n continue\n output_str = output_str + c.upper() if should_upper_case else output_str + c\n should_upper_case = False\n return output_str\n\n\ndef lower_first_letter(s: str) -> str:\n return s[:1].lower() + s[1:] if s else ''\n\n\ndef recurse_data(data):\n if hasattr(data, 'to_json'):\n data = data.__dict__\n if isinstance(data, dict):\n data = dict(data)\n lower_camel_case_keys(data)\n data = {k: recurse_data(v) if k != 'data' else v\n for k, v in data.items()}\n elif isinstance(data, list):\n data = [recurse_data(d) for d in data]\n return data\n\n\nclass DeckGL(ModelPane):\n \"\"\"\n The `DeckGL` pane renders the Deck.gl\n JSON specification as well as PyDeck plots inside a panel.\n\n Deck.gl is a very powerful WebGL-powered framework for visual exploratory\n data analysis of large datasets.\n\n Reference: https://panel.holoviz.org/reference/panes/DeckGL.html\n\n :Example:\n\n >>> pn.extension('deckgl')\n >>> DeckGL(\n ... some_deckgl_dict_or_pydeck_object,\n ... mapbox_api_key=MAPBOX_KEY, height=600\n ... )\n \"\"\"\n\n mapbox_api_key = param.String(default=None, doc=\"\"\"\n The MapBox API key if not supplied by a PyDeck object.\"\"\")\n\n tooltips = param.ClassSelector(default=True, class_=(bool, dict), doc=\"\"\"\n Whether to enable tooltips\"\"\")\n\n click_state = param.Dict(default={}, doc=\"\"\"\n Contains the last click event on the DeckGL plot.\"\"\")\n\n hover_state = param.Dict(default={}, doc=\"\"\"\n The current hover state of the DeckGL plot.\"\"\")\n\n view_state = param.Dict(default={}, doc=\"\"\"\n The current view state of the DeckGL plot.\"\"\")\n\n throttle = param.Dict(default={'view': 200, 'hover': 200}, doc=\"\"\"\n Throttling timeout (in milliseconds) for view state and hover\n events sent from the frontend.\"\"\")\n\n _rename: ClassVar[Mapping[str, str | None]] = {\n 'click_state': 'clickState', 'hover_state': 'hoverState',\n 'view_state': 'viewState', 'tooltips': 'tooltip'\n }\n\n _updates: ClassVar[bool] = True\n\n priority: ClassVar[float | bool | None] = None\n\n @classmethod\n def applies(cls, obj: Any) -> float | bool | None:\n if cls.is_pydeck(obj):\n return 0.8\n elif isinstance(obj, (dict, str)):\n return 0\n return False\n\n @classmethod\n def is_pydeck(cls, obj):\n if 'pydeck' in sys.modules:\n import pydeck\n return isinstance(obj, pydeck.bindings.deck.Deck)\n return False\n\n @classmethod\n def _process_data(cls, data):\n columns = defaultdict(list)\n for d in data:\n for col, val in d.items():\n columns[col].append(val)\n return {col: np.asarray(vals) for col, vals in columns.items()}\n\n @classmethod\n def _update_sources(cls, json_data, sources):\n layers = json_data.get('layers', [])\n\n # Create index of sources by columns\n source_columns = defaultdict(list)\n for i, source in enumerate(sources):\n key = tuple(sorted(source.data.keys()))\n source_columns[key].append((i, source))\n\n # Process\n unprocessed, unused = [], list(sources)\n for layer in layers:\n data = layer.get('data')\n if is_dataframe(data):\n data = ColumnDataSource.from_df(data)\n elif (isinstance(data, list) and data\n and isinstance(data[0], dict)):\n data = cls._process_data(data)\n else:\n continue\n\n key = tuple(sorted(data.keys()))\n existing = source_columns.get(key)\n if existing:\n index, cds = existing.pop()\n layer['data'] = index\n updates = {}\n for col, values in data.items():\n if not np.array_equal(data[col], cds.data[col]):\n updates[col] = values\n if updates:\n cds.data.update(updates)\n unused.remove(cds)\n else:\n unprocessed.append((layer, data))\n\n for layer, data in unprocessed:\n if unused:\n cds = unused.pop()\n cds.data = data\n else:\n cds = ColumnDataSource(data)\n sources.append(cds)\n layer['data'] = sources.index(cds)\n\n def _transform_object(self, obj) -> Dict[str, Any]:\n if self.object is None:\n data, mapbox_api_key, tooltip = {}, self.mapbox_api_key, self.tooltips\n elif isinstance(self.object, (str, dict)):\n if isinstance(self.object, str):\n data = json.loads(self.object)\n else:\n data = dict(self.object)\n data['layers'] = [dict(layer) for layer in data.get('layers', [])]\n mapbox_api_key = self.mapbox_api_key\n tooltip = self.tooltips\n else:\n data = dict(self.object.__dict__)\n mapbox_api_key = data.pop('mapbox_key', self.mapbox_api_key)\n deck_widget = data.pop('deck_widget', None)\n if isinstance(self.tooltips, dict) or deck_widget is None:\n tooltip = self.tooltips\n else:\n tooltip = deck_widget.tooltip\n data = {k: v for k, v in recurse_data(data).items() if v is not None}\n\n # Delete undefined width and height\n for view in data.get('views', []):\n if view.get('width', False) is None:\n view.pop('width')\n if view.get('height', False) is None:\n view.pop('height')\n\n return dict(data=data, tooltip=tooltip, mapbox_api_key=mapbox_api_key or \"\")\n\n def _get_model(\n self, doc: Document, root: Optional[Model] = None,\n parent: Optional[Model] = None, comm: Optional[Comm] = None\n ) -> Model:\n self._bokeh_model = DeckGLPlot = lazy_load(\n 'panel.models.deckgl', 'DeckGLPlot', isinstance(comm, JupyterComm), root\n )\n properties = self._get_properties(doc)\n data = properties.pop('data')\n properties['data_sources'] = sources = []\n self._update_sources(data, sources)\n properties['layers'] = data.pop('layers', [])\n properties['initialViewState'] = data.pop('initialViewState', {})\n model = DeckGLPlot(data=data, **properties)\n root = root or model\n self._link_props(model, ['clickState', 'hoverState', 'viewState'], doc, root, comm)\n self._models[root.ref[\"id\"]] = (model, parent)\n return model\n\n def _update(self, ref: str, model: Model) -> None:\n properties = self._get_properties(model.document)\n data = properties.pop('data')\n self._update_sources(data, model.data_sources)\n properties['data'] = data\n properties['layers'] = data.pop('layers', [])\n properties['initialViewState'] = data.pop('initialViewState', {})\n model.update(**properties)\n", "path": "panel/pane/deckgl.py"}]} | 3,346 | 691 |
gh_patches_debug_21066 | rasdani/github-patches | git_diff | googleapis__google-auth-library-python-1428 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
deprecation notice for 3.7 modifies global state (user warning filters) as import side-effect
It is impossible to filter Python37DeprecationWarning after PR https://github.com/googleapis/google-auth-library-python/pull/1371.
Custom libraries should not configure warning filters, because it is user project's global state. Most of the times you cannot modify import order and insert new warning filters after your library modifies them.
#### Environment details
- OS: Ubuntu 22.04.3 LTS linux 5.15.0-89-generic
- Python version: 3.7.17
- pip version: 23.3.1
- `google-auth` version: 2.24.0
#### Steps to reproduce
1. install google-auth into your python3.7 project
2. configure filterwarning rule `ignore::DeprecationWarning` in pytest.ini
3. use google.auth or google.oauth2 somewhere in your project
4. run pytest
5. get Python37DeprecationWarning that you cannot filter
</issue>
<code>
[start of google/oauth2/__init__.py]
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Google OAuth 2.0 Library for Python."""
16
17 import sys
18 import warnings
19
20
21 class Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER
22 """
23 Deprecation warning raised when Python 3.7 runtime is detected.
24 Python 3.7 support will be dropped after January 1, 2024. See
25 https://cloud.google.com/python/docs/python37-sunset/ for more information.
26 """
27
28 pass
29
30
31 # Checks if the current runtime is Python 3.7.
32 if sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER
33 message = (
34 "After January 1, 2024, new releases of this library will drop support "
35 "for Python 3.7. More details about Python 3.7 support "
36 "can be found at https://cloud.google.com/python/docs/python37-sunset/"
37 )
38 # Configure the Python37DeprecationWarning warning so that it is only emitted once.
39 warnings.simplefilter("once", Python37DeprecationWarning)
40 warnings.warn(message, Python37DeprecationWarning)
41
[end of google/oauth2/__init__.py]
[start of google/auth/__init__.py]
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Google Auth Library for Python."""
16
17 import logging
18 import sys
19 import warnings
20
21 from google.auth import version as google_auth_version
22 from google.auth._default import (
23 default,
24 load_credentials_from_dict,
25 load_credentials_from_file,
26 )
27
28
29 __version__ = google_auth_version.__version__
30
31
32 __all__ = ["default", "load_credentials_from_file", "load_credentials_from_dict"]
33
34
35 class Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER
36 """
37 Deprecation warning raised when Python 3.7 runtime is detected.
38 Python 3.7 support will be dropped after January 1, 2024. See
39 https://cloud.google.com/python/docs/python37-sunset/ for more information.
40 """
41
42 pass
43
44
45 # Checks if the current runtime is Python 3.7.
46 if sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER
47 message = (
48 "After January 1, 2024, new releases of this library will drop support "
49 "for Python 3.7. More details about Python 3.7 support "
50 "can be found at https://cloud.google.com/python/docs/python37-sunset/"
51 )
52
53 # Configure the Python37DeprecationWarning warning so that it is only emitted once.
54 warnings.simplefilter("once", Python37DeprecationWarning)
55 warnings.warn(message, Python37DeprecationWarning)
56
57 # Set default logging handler to avoid "No handler found" warnings.
58 logging.getLogger(__name__).addHandler(logging.NullHandler())
59
[end of google/auth/__init__.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/google/auth/__init__.py b/google/auth/__init__.py
--- a/google/auth/__init__.py
+++ b/google/auth/__init__.py
@@ -49,9 +49,6 @@
"for Python 3.7. More details about Python 3.7 support "
"can be found at https://cloud.google.com/python/docs/python37-sunset/"
)
-
- # Configure the Python37DeprecationWarning warning so that it is only emitted once.
- warnings.simplefilter("once", Python37DeprecationWarning)
warnings.warn(message, Python37DeprecationWarning)
# Set default logging handler to avoid "No handler found" warnings.
diff --git a/google/oauth2/__init__.py b/google/oauth2/__init__.py
--- a/google/oauth2/__init__.py
+++ b/google/oauth2/__init__.py
@@ -35,6 +35,4 @@
"for Python 3.7. More details about Python 3.7 support "
"can be found at https://cloud.google.com/python/docs/python37-sunset/"
)
- # Configure the Python37DeprecationWarning warning so that it is only emitted once.
- warnings.simplefilter("once", Python37DeprecationWarning)
warnings.warn(message, Python37DeprecationWarning)
| {"golden_diff": "diff --git a/google/auth/__init__.py b/google/auth/__init__.py\n--- a/google/auth/__init__.py\n+++ b/google/auth/__init__.py\n@@ -49,9 +49,6 @@\n \"for Python 3.7. More details about Python 3.7 support \"\n \"can be found at https://cloud.google.com/python/docs/python37-sunset/\"\n )\n-\n- # Configure the Python37DeprecationWarning warning so that it is only emitted once.\n- warnings.simplefilter(\"once\", Python37DeprecationWarning)\n warnings.warn(message, Python37DeprecationWarning)\n \n # Set default logging handler to avoid \"No handler found\" warnings.\ndiff --git a/google/oauth2/__init__.py b/google/oauth2/__init__.py\n--- a/google/oauth2/__init__.py\n+++ b/google/oauth2/__init__.py\n@@ -35,6 +35,4 @@\n \"for Python 3.7. More details about Python 3.7 support \"\n \"can be found at https://cloud.google.com/python/docs/python37-sunset/\"\n )\n- # Configure the Python37DeprecationWarning warning so that it is only emitted once.\n- warnings.simplefilter(\"once\", Python37DeprecationWarning)\n warnings.warn(message, Python37DeprecationWarning)\n", "issue": "deprecation notice for 3.7 modifies global state (user warning filters) as import side-effect\nIt is impossible to filter Python37DeprecationWarning after PR https://github.com/googleapis/google-auth-library-python/pull/1371.\r\n\r\nCustom libraries should not configure warning filters, because it is user project's global state. Most of the times you cannot modify import order and insert new warning filters after your library modifies them.\r\n\r\n#### Environment details\r\n\r\n - OS: Ubuntu 22.04.3 LTS linux 5.15.0-89-generic\r\n - Python version: 3.7.17\r\n - pip version: 23.3.1\r\n - `google-auth` version: 2.24.0\r\n\r\n#### Steps to reproduce\r\n\r\n 1. install google-auth into your python3.7 project\r\n 2. configure filterwarning rule `ignore::DeprecationWarning` in pytest.ini\r\n 3. use google.auth or google.oauth2 somewhere in your project\r\n 4. run pytest\r\n 5. get Python37DeprecationWarning that you cannot filter \n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google OAuth 2.0 Library for Python.\"\"\"\n\nimport sys\nimport warnings\n\n\nclass Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER\n \"\"\"\n Deprecation warning raised when Python 3.7 runtime is detected.\n Python 3.7 support will be dropped after January 1, 2024. See\n https://cloud.google.com/python/docs/python37-sunset/ for more information.\n \"\"\"\n\n pass\n\n\n# Checks if the current runtime is Python 3.7.\nif sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER\n message = (\n \"After January 1, 2024, new releases of this library will drop support \"\n \"for Python 3.7. More details about Python 3.7 support \"\n \"can be found at https://cloud.google.com/python/docs/python37-sunset/\"\n )\n # Configure the Python37DeprecationWarning warning so that it is only emitted once.\n warnings.simplefilter(\"once\", Python37DeprecationWarning)\n warnings.warn(message, Python37DeprecationWarning)\n", "path": "google/oauth2/__init__.py"}, {"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google Auth Library for Python.\"\"\"\n\nimport logging\nimport sys\nimport warnings\n\nfrom google.auth import version as google_auth_version\nfrom google.auth._default import (\n default,\n load_credentials_from_dict,\n load_credentials_from_file,\n)\n\n\n__version__ = google_auth_version.__version__\n\n\n__all__ = [\"default\", \"load_credentials_from_file\", \"load_credentials_from_dict\"]\n\n\nclass Python37DeprecationWarning(DeprecationWarning): # pragma: NO COVER\n \"\"\"\n Deprecation warning raised when Python 3.7 runtime is detected.\n Python 3.7 support will be dropped after January 1, 2024. See\n https://cloud.google.com/python/docs/python37-sunset/ for more information.\n \"\"\"\n\n pass\n\n\n# Checks if the current runtime is Python 3.7.\nif sys.version_info.major == 3 and sys.version_info.minor == 7: # pragma: NO COVER\n message = (\n \"After January 1, 2024, new releases of this library will drop support \"\n \"for Python 3.7. More details about Python 3.7 support \"\n \"can be found at https://cloud.google.com/python/docs/python37-sunset/\"\n )\n\n # Configure the Python37DeprecationWarning warning so that it is only emitted once.\n warnings.simplefilter(\"once\", Python37DeprecationWarning)\n warnings.warn(message, Python37DeprecationWarning)\n\n# Set default logging handler to avoid \"No handler found\" warnings.\nlogging.getLogger(__name__).addHandler(logging.NullHandler())\n", "path": "google/auth/__init__.py"}]} | 1,858 | 296 |
gh_patches_debug_8968 | rasdani/github-patches | git_diff | pyca__cryptography-3335 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
Add Python 3.6 support
</issue>
<code>
[start of setup.py]
1 #!/usr/bin/env python
2
3 # This file is dual licensed under the terms of the Apache License, Version
4 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
5 # for complete details.
6
7 from __future__ import absolute_import, division, print_function
8
9 import os
10 import platform
11 import subprocess
12 import sys
13 from distutils.command.build import build
14
15 import pkg_resources
16
17 from setuptools import find_packages, setup
18 from setuptools.command.install import install
19 from setuptools.command.test import test
20
21
22 base_dir = os.path.dirname(__file__)
23 src_dir = os.path.join(base_dir, "src")
24
25 # When executing the setup.py, we need to be able to import ourselves, this
26 # means that we need to add the src/ directory to the sys.path.
27 sys.path.insert(0, src_dir)
28
29 about = {}
30 with open(os.path.join(src_dir, "cryptography", "__about__.py")) as f:
31 exec(f.read(), about)
32
33
34 VECTORS_DEPENDENCY = "cryptography_vectors=={0}".format(about['__version__'])
35
36 requirements = [
37 "idna>=2.0",
38 "pyasn1>=0.1.8",
39 "six>=1.4.1",
40 "setuptools>=11.3",
41 ]
42 setup_requirements = []
43
44 if sys.version_info < (3, 4):
45 requirements.append("enum34")
46
47 if sys.version_info < (3, 3):
48 requirements.append("ipaddress")
49
50 if platform.python_implementation() == "PyPy":
51 if sys.pypy_version_info < (2, 6):
52 raise RuntimeError(
53 "cryptography 1.0 is not compatible with PyPy < 2.6. Please "
54 "upgrade PyPy to use this library."
55 )
56 else:
57 requirements.append("cffi>=1.4.1")
58 setup_requirements.append("cffi>=1.4.1")
59
60 test_requirements = [
61 "pytest>=2.9.0",
62 "pretend",
63 "iso8601",
64 "pyasn1_modules",
65 "pytz",
66 ]
67 if sys.version_info[:2] > (2, 6):
68 test_requirements.append("hypothesis>=1.11.4")
69
70
71 # If there's no vectors locally that probably means we are in a tarball and
72 # need to go and get the matching vectors package from PyPi
73 if not os.path.exists(os.path.join(base_dir, "vectors/setup.py")):
74 test_requirements.append(VECTORS_DEPENDENCY)
75
76
77 def cc_is_available():
78 return sys.platform == "darwin" and list(map(
79 int, platform.mac_ver()[0].split("."))) >= [10, 8, 0]
80
81
82 backends = [
83 "openssl = cryptography.hazmat.backends.openssl:backend"
84 ]
85
86 if cc_is_available():
87 backends.append(
88 "commoncrypto = cryptography.hazmat.backends.commoncrypto:backend",
89 )
90
91
92 class PyTest(test):
93 def finalize_options(self):
94 test.finalize_options(self)
95 self.test_args = []
96 self.test_suite = True
97
98 # This means there's a vectors/ folder with the package in here.
99 # cd into it, install the vectors package and then refresh sys.path
100 if VECTORS_DEPENDENCY not in test_requirements:
101 subprocess.check_call(
102 [sys.executable, "setup.py", "install"], cwd="vectors"
103 )
104 pkg_resources.get_distribution("cryptography_vectors").activate()
105
106 def run_tests(self):
107 # Import here because in module scope the eggs are not loaded.
108 import pytest
109 test_args = [os.path.join(base_dir, "tests")]
110 errno = pytest.main(test_args)
111 sys.exit(errno)
112
113
114 def keywords_with_side_effects(argv):
115 """
116 Get a dictionary with setup keywords that (can) have side effects.
117
118 :param argv: A list of strings with command line arguments.
119 :returns: A dictionary with keyword arguments for the ``setup()`` function.
120
121 This setup.py script uses the setuptools 'setup_requires' feature because
122 this is required by the cffi package to compile extension modules. The
123 purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi
124 build process as a result of setup.py invocations that don't need the cffi
125 module to be built (setup.py serves the dual purpose of exposing package
126 metadata).
127
128 All of the options listed by ``python setup.py --help`` that print
129 information should be recognized here. The commands ``clean``,
130 ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.
131 Any combination of these options and commands is also supported.
132
133 This function was originally based on the `setup.py script`_ of SciPy (see
134 also the discussion in `pip issue #25`_).
135
136 .. _pip issue #25: https://github.com/pypa/pip/issues/25
137 .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py
138 """
139 no_setup_requires_arguments = (
140 '-h', '--help',
141 '-n', '--dry-run',
142 '-q', '--quiet',
143 '-v', '--verbose',
144 '-V', '--version',
145 '--author',
146 '--author-email',
147 '--classifiers',
148 '--contact',
149 '--contact-email',
150 '--description',
151 '--egg-base',
152 '--fullname',
153 '--help-commands',
154 '--keywords',
155 '--licence',
156 '--license',
157 '--long-description',
158 '--maintainer',
159 '--maintainer-email',
160 '--name',
161 '--no-user-cfg',
162 '--obsoletes',
163 '--platforms',
164 '--provides',
165 '--requires',
166 '--url',
167 'clean',
168 'egg_info',
169 'register',
170 'sdist',
171 'upload',
172 )
173
174 def is_short_option(argument):
175 """Check whether a command line argument is a short option."""
176 return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'
177
178 def expand_short_options(argument):
179 """Expand combined short options into canonical short options."""
180 return ('-' + char for char in argument[1:])
181
182 def argument_without_setup_requirements(argv, i):
183 """Check whether a command line argument needs setup requirements."""
184 if argv[i] in no_setup_requires_arguments:
185 # Simple case: An argument which is either an option or a command
186 # which doesn't need setup requirements.
187 return True
188 elif (is_short_option(argv[i]) and
189 all(option in no_setup_requires_arguments
190 for option in expand_short_options(argv[i]))):
191 # Not so simple case: Combined short options none of which need
192 # setup requirements.
193 return True
194 elif argv[i - 1:i] == ['--egg-base']:
195 # Tricky case: --egg-info takes an argument which should not make
196 # us use setup_requires (defeating the purpose of this code).
197 return True
198 else:
199 return False
200
201 if all(argument_without_setup_requirements(argv, i)
202 for i in range(1, len(argv))):
203 return {
204 "cmdclass": {
205 "build": DummyBuild,
206 "install": DummyInstall,
207 "test": DummyPyTest,
208 }
209 }
210 else:
211 cffi_modules = [
212 "src/_cffi_src/build_openssl.py:ffi",
213 "src/_cffi_src/build_constant_time.py:ffi",
214 "src/_cffi_src/build_padding.py:ffi",
215 ]
216 if cc_is_available():
217 cffi_modules.append("src/_cffi_src/build_commoncrypto.py:ffi")
218
219 return {
220 "setup_requires": setup_requirements,
221 "cmdclass": {
222 "test": PyTest,
223 },
224 "cffi_modules": cffi_modules
225 }
226
227
228 setup_requires_error = ("Requested setup command that needs 'setup_requires' "
229 "while command line arguments implied a side effect "
230 "free command or option.")
231
232
233 class DummyBuild(build):
234 """
235 This class makes it very obvious when ``keywords_with_side_effects()`` has
236 incorrectly interpreted the command line arguments to ``setup.py build`` as
237 one of the 'side effect free' commands or options.
238 """
239
240 def run(self):
241 raise RuntimeError(setup_requires_error)
242
243
244 class DummyInstall(install):
245 """
246 This class makes it very obvious when ``keywords_with_side_effects()`` has
247 incorrectly interpreted the command line arguments to ``setup.py install``
248 as one of the 'side effect free' commands or options.
249 """
250
251 def run(self):
252 raise RuntimeError(setup_requires_error)
253
254
255 class DummyPyTest(test):
256 """
257 This class makes it very obvious when ``keywords_with_side_effects()`` has
258 incorrectly interpreted the command line arguments to ``setup.py test`` as
259 one of the 'side effect free' commands or options.
260 """
261
262 def run_tests(self):
263 raise RuntimeError(setup_requires_error)
264
265
266 with open(os.path.join(base_dir, "README.rst")) as f:
267 long_description = f.read()
268
269
270 setup(
271 name=about["__title__"],
272 version=about["__version__"],
273
274 description=about["__summary__"],
275 long_description=long_description,
276 license=about["__license__"],
277 url=about["__uri__"],
278
279 author=about["__author__"],
280 author_email=about["__email__"],
281
282 classifiers=[
283 "Intended Audience :: Developers",
284 "License :: OSI Approved :: Apache Software License",
285 "License :: OSI Approved :: BSD License",
286 "Natural Language :: English",
287 "Operating System :: MacOS :: MacOS X",
288 "Operating System :: POSIX",
289 "Operating System :: POSIX :: BSD",
290 "Operating System :: POSIX :: Linux",
291 "Operating System :: Microsoft :: Windows",
292 "Programming Language :: Python",
293 "Programming Language :: Python :: 2",
294 "Programming Language :: Python :: 2.6",
295 "Programming Language :: Python :: 2.7",
296 "Programming Language :: Python :: 3",
297 "Programming Language :: Python :: 3.3",
298 "Programming Language :: Python :: 3.4",
299 "Programming Language :: Python :: 3.5",
300 "Programming Language :: Python :: Implementation :: CPython",
301 "Programming Language :: Python :: Implementation :: PyPy",
302 "Topic :: Security :: Cryptography",
303 ],
304
305 package_dir={"": "src"},
306 packages=find_packages(where="src", exclude=["_cffi_src", "_cffi_src.*"]),
307 include_package_data=True,
308
309 install_requires=requirements,
310 tests_require=test_requirements,
311 extras_require={
312 "test": test_requirements,
313 "docstest": [
314 "doc8",
315 "pyenchant",
316 "readme_renderer >= 16.0",
317 "sphinx",
318 "sphinx_rtd_theme",
319 "sphinxcontrib-spelling",
320 ],
321 "pep8test": [
322 "flake8",
323 "flake8-import-order",
324 "pep8-naming",
325 ],
326 },
327
328 # for cffi
329 zip_safe=False,
330 ext_package="cryptography.hazmat.bindings",
331 entry_points={
332 "cryptography.backends": backends,
333 },
334 **keywords_with_side_effects(sys.argv)
335 )
336
[end of setup.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -297,6 +297,7 @@
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
+ "Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Security :: Cryptography",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -297,6 +297,7 @@\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n+ \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n", "issue": "Add Python 3.6 support\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nVECTORS_DEPENDENCY = \"cryptography_vectors=={0}\".format(about['__version__'])\n\nrequirements = [\n \"idna>=2.0\",\n \"pyasn1>=0.1.8\",\n \"six>=1.4.1\",\n \"setuptools>=11.3\",\n]\nsetup_requirements = []\n\nif sys.version_info < (3, 4):\n requirements.append(\"enum34\")\n\nif sys.version_info < (3, 3):\n requirements.append(\"ipaddress\")\n\nif platform.python_implementation() == \"PyPy\":\n if sys.pypy_version_info < (2, 6):\n raise RuntimeError(\n \"cryptography 1.0 is not compatible with PyPy < 2.6. Please \"\n \"upgrade PyPy to use this library.\"\n )\nelse:\n requirements.append(\"cffi>=1.4.1\")\n setup_requirements.append(\"cffi>=1.4.1\")\n\ntest_requirements = [\n \"pytest>=2.9.0\",\n \"pretend\",\n \"iso8601\",\n \"pyasn1_modules\",\n \"pytz\",\n]\nif sys.version_info[:2] > (2, 6):\n test_requirements.append(\"hypothesis>=1.11.4\")\n\n\n# If there's no vectors locally that probably means we are in a tarball and\n# need to go and get the matching vectors package from PyPi\nif not os.path.exists(os.path.join(base_dir, \"vectors/setup.py\")):\n test_requirements.append(VECTORS_DEPENDENCY)\n\n\ndef cc_is_available():\n return sys.platform == \"darwin\" and list(map(\n int, platform.mac_ver()[0].split(\".\"))) >= [10, 8, 0]\n\n\nbackends = [\n \"openssl = cryptography.hazmat.backends.openssl:backend\"\n]\n\nif cc_is_available():\n backends.append(\n \"commoncrypto = cryptography.hazmat.backends.commoncrypto:backend\",\n )\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n # This means there's a vectors/ folder with the package in here.\n # cd into it, install the vectors package and then refresh sys.path\n if VECTORS_DEPENDENCY not in test_requirements:\n subprocess.check_call(\n [sys.executable, \"setup.py\", \"install\"], cwd=\"vectors\"\n )\n pkg_resources.get_distribution(\"cryptography_vectors\").activate()\n\n def run_tests(self):\n # Import here because in module scope the eggs are not loaded.\n import pytest\n test_args = [os.path.join(base_dir, \"tests\")]\n errno = pytest.main(test_args)\n sys.exit(errno)\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyBuild,\n \"install\": DummyInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n cffi_modules = [\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_constant_time.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ]\n if cc_is_available():\n cffi_modules.append(\"src/_cffi_src/build_commoncrypto.py:ffi\")\n\n return {\n \"setup_requires\": setup_requirements,\n \"cmdclass\": {\n \"test\": PyTest,\n },\n \"cffi_modules\": cffi_modules\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyPyTest(test):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py test`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run_tests(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\"]),\n include_package_data=True,\n\n install_requires=requirements,\n tests_require=test_requirements,\n extras_require={\n \"test\": test_requirements,\n \"docstest\": [\n \"doc8\",\n \"pyenchant\",\n \"readme_renderer >= 16.0\",\n \"sphinx\",\n \"sphinx_rtd_theme\",\n \"sphinxcontrib-spelling\",\n ],\n \"pep8test\": [\n \"flake8\",\n \"flake8-import-order\",\n \"pep8-naming\",\n ],\n },\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n entry_points={\n \"cryptography.backends\": backends,\n },\n **keywords_with_side_effects(sys.argv)\n)\n", "path": "setup.py"}]} | 3,883 | 116 |
gh_patches_debug_26498 | rasdani/github-patches | git_diff | privacyidea__privacyidea-3836 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
OTP+PIN does not work offline
<!--
Thank you for filing a bug within privacyIDEA. The more information you provide, the easier it is to identify the problem!
Issues are not meant for asking questions! You're welcome to ask questions in our community forum: https://community.privacyidea.org
privacyIDEA is licensed under the AGPL and comes without warranty. If you require reliable, professional help, please consider getting either
- consultancy
(https://netknights.it/en/leistungen/one-time-services/)
- or the privacyIDEA Enterprise Edition
(https://netknights.it/en/leistungen/service-level-agreements/)
-->
### Top-level intent
Do offline with an OTP pin that is used *behind* the OTP value: OTP+PIN.
### Expected outcome
Be able to authenticate offline with OTP+PIN,
### Actual outcome
The online authentication works out, all offline authentications fail.
### Solution
The offline data for HOTP is created by in a fixed manner concatenate PIN+OTP here:
https://github.com/privacyidea/privacyidea/blob/1eb6c8ff6b283ae75d62e5d2c2635e55276d5515/privacyidea/lib/applications/offline.py#L86
This means the user will try to authenticate with OTP+PIN and will always fail.
We will have to get the config value of "prepend PIN" and use it to either concatenate PIN+OTP or OTP+PIN.
</issue>
<code>
[start of privacyidea/lib/applications/offline.py]
1 # -*- coding: utf-8 -*-
2 #
3 # 2015-04-08 Cornelius Kölbel <[email protected]>
4 # Add options ROUNDS to avoid timeouts during OTP hash calculation
5 # 2015-04-03 Cornelius Kölbel <[email protected]>
6 # Use pbkdf2 for OTP hashing
7 # 2015-03-13 Cornelius Kölbel, <[email protected]>
8 # initial writeup
9 #
10 # License: AGPLv3
11 # contact: http://www.privacyidea.org
12 #
13 # This code is free software; you can redistribute it and/or
14 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
15 # License as published by the Free Software Foundation; either
16 # version 3 of the License, or any later version.
17 #
18 # This code is distributed in the hope that it will be useful,
19 # but WITHOUT ANY WARRANTY; without even the implied warranty of
20 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 # GNU AFFERO GENERAL PUBLIC LICENSE for more details.
22 #
23 # You should have received a copy of the GNU Affero General Public
24 # License along with this program. If not, see <http://www.gnu.org/licenses/>.
25 #
26 from privacyidea.lib.applications import MachineApplicationBase
27 from privacyidea.lib.crypto import geturandom
28 from privacyidea.lib.error import ValidateError, ParameterError
29 import logging
30 from passlib.hash import pbkdf2_sha512
31 from privacyidea.lib.token import get_tokens
32 from privacyidea.lib.policy import TYPE
33 log = logging.getLogger(__name__)
34 ROUNDS = 6549
35 REFILLTOKEN_LENGTH = 40
36
37
38 class MachineApplication(MachineApplicationBase):
39 """
40 This is the application for Offline authentication with PAM or
41 the privacyIDEA credential provider.
42
43 The machine application returns a list of salted OTP hashes to be used with
44 offline authentication. The token then is disabled, so that it can not
45 be used for online authentication anymore, to avoid reusing a fished OTP
46 value.
47
48 The server stores the information, which OTP values were issued.
49
50 options options:
51 * user: a username.
52 * count: is the number of OTP values returned
53
54 """
55 application_name = "offline"
56
57 @staticmethod
58 def generate_new_refilltoken(token_obj):
59 """
60 Generate new refill token and store it in the tokeninfo of the token.
61 :param token_obj: token in question
62 :return: a string
63 """
64 new_refilltoken = geturandom(REFILLTOKEN_LENGTH, hex=True)
65 token_obj.add_tokeninfo("refilltoken", new_refilltoken)
66 return new_refilltoken
67
68 @staticmethod
69 def get_offline_otps(token_obj, otppin, amount, rounds=ROUNDS):
70 """
71 Retrieve the desired number of passwords (= PIN + OTP), hash them
72 and return them in a dictionary. Increase the token counter.
73 :param token_obj: token in question
74 :param otppin: The OTP PIN to prepend in the passwords. The PIN is not validated!
75 :param amount: Number of OTP values (non-negative!)
76 :param rounds: Number of PBKDF2 rounds
77 :return: dictionary
78 """
79 if amount < 0:
80 raise ParameterError("Invalid refill amount: {!r}".format(amount))
81 (res, err, otp_dict) = token_obj.get_multi_otp(count=amount, counter_index=True)
82 otps = otp_dict.get("otp")
83 for key in otps.keys():
84 # Return the hash of OTP PIN and OTP values
85 otps[key] = pbkdf2_sha512.using(
86 rounds=rounds, salt_size=10).hash(otppin + otps.get(key))
87 # We do not disable the token, so if all offline OTP values
88 # are used, the token can be used the authenticate online again.
89 # token_obj.enable(False)
90 # increase the counter by the consumed values and
91 # also store it in tokeninfo.
92 token_obj.inc_otp_counter(increment=amount)
93
94 return otps
95
96 @staticmethod
97 def get_refill(token_obj, password, options=None):
98 """
99 Returns new authentication OTPs to refill the client
100
101 To do so we also verify the password, which may consist of PIN + OTP.
102
103 :param token_obj: Token object
104 :param password: PIN + OTP
105 :param options: dict that might contain "count" and "rounds"
106 :return: a dictionary of auth items
107 """
108 options = options or {}
109 count = int(options.get("count", 100))
110 rounds = int(options.get("rounds", ROUNDS))
111 _r, otppin, otpval = token_obj.split_pin_pass(password)
112 if not _r:
113 raise ParameterError("Could not split password")
114 current_token_counter = token_obj.token.count
115 first_offline_counter = current_token_counter - count
116 if first_offline_counter < 0:
117 first_offline_counter = 0
118 # find the value in the offline OTP values! This resets the token.count!
119 matching_count = token_obj.check_otp(otpval, first_offline_counter, count)
120 token_obj.set_otp_count(current_token_counter)
121 # Raise an exception *after* we reset the token counter
122 if matching_count < 0:
123 raise ValidateError("You provided a wrong OTP value.")
124 # We have to add 1 here: Assume *first_offline_counter* is the counter value of the first offline OTP
125 # we sent to the client. Assume the client then requests a refill with that exact OTP value.
126 # Then, we need to respond with a refill of one OTP value, as the client has consumed one OTP value.
127 counter_diff = matching_count - first_offline_counter + 1
128 otps = MachineApplication.get_offline_otps(token_obj, otppin, counter_diff, rounds)
129 token_obj.add_tokeninfo(key="offline_counter",
130 value=count)
131 return otps
132
133 @staticmethod
134 def get_authentication_item(token_type,
135 serial,
136 challenge=None, options=None,
137 filter_param=None):
138 """
139 :param token_type: the type of the token. At the moment
140 we only support "HOTP" token. Supporting time
141 based tokens is difficult, since we would have to
142 return a looooong list of OTP values.
143 Supporting "yubikey" token (AES) would be
144 possible, too.
145 :param serial: the serial number of the token.
146 :param challenge: This can contain the password (otp pin + otp
147 value) so that we can put the OTP PIN into the hashed response.
148 :type challenge: basestring
149 :return auth_item: A list of hashed OTP values
150 """
151 ret = {}
152 options = options or {}
153 password = challenge
154 if token_type.lower() == "hotp":
155 tokens = get_tokens(serial=serial)
156 if len(tokens) == 1:
157 token_obj = tokens[0]
158 if password:
159 _r, otppin, _ = token_obj.split_pin_pass(password)
160 if not _r:
161 raise ParameterError("Could not split password")
162 else:
163 otppin = ""
164 otps = MachineApplication.get_offline_otps(token_obj,
165 otppin,
166 int(options.get("count", 100)),
167 int(options.get("rounds", ROUNDS)))
168 refilltoken = MachineApplication.generate_new_refilltoken(token_obj)
169 ret["response"] = otps
170 ret["refilltoken"] = refilltoken
171 user_object = token_obj.user
172 if user_object:
173 uInfo = user_object.info
174 if "username" in uInfo:
175 ret["user"] = ret["username"] = uInfo.get("username")
176
177 else:
178 log.info("Token %r, type %r is not supported by "
179 "OFFLINE application module" % (serial, token_type))
180
181 return ret
182
183 @staticmethod
184 def get_options():
185 """
186 returns a dictionary with a list of required and optional options
187 """
188 return {'count': {'type': TYPE.STRING},
189 'rounds': {'type': TYPE.STRING}}
190
[end of privacyidea/lib/applications/offline.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/privacyidea/lib/applications/offline.py b/privacyidea/lib/applications/offline.py
--- a/privacyidea/lib/applications/offline.py
+++ b/privacyidea/lib/applications/offline.py
@@ -29,6 +29,7 @@
import logging
from passlib.hash import pbkdf2_sha512
from privacyidea.lib.token import get_tokens
+from privacyidea.lib.config import get_prepend_pin
from privacyidea.lib.policy import TYPE
log = logging.getLogger(__name__)
ROUNDS = 6549
@@ -80,10 +81,12 @@
raise ParameterError("Invalid refill amount: {!r}".format(amount))
(res, err, otp_dict) = token_obj.get_multi_otp(count=amount, counter_index=True)
otps = otp_dict.get("otp")
- for key in otps.keys():
+ prepend_pin = get_prepend_pin()
+ for key, otp in otps.items():
# Return the hash of OTP PIN and OTP values
+ otppw = otppin + otp if prepend_pin else otp + otppin
otps[key] = pbkdf2_sha512.using(
- rounds=rounds, salt_size=10).hash(otppin + otps.get(key))
+ rounds=rounds, salt_size=10).hash(otppw)
# We do not disable the token, so if all offline OTP values
# are used, the token can be used the authenticate online again.
# token_obj.enable(False)
| {"golden_diff": "diff --git a/privacyidea/lib/applications/offline.py b/privacyidea/lib/applications/offline.py\n--- a/privacyidea/lib/applications/offline.py\n+++ b/privacyidea/lib/applications/offline.py\n@@ -29,6 +29,7 @@\n import logging\n from passlib.hash import pbkdf2_sha512\n from privacyidea.lib.token import get_tokens\n+from privacyidea.lib.config import get_prepend_pin\n from privacyidea.lib.policy import TYPE\n log = logging.getLogger(__name__)\n ROUNDS = 6549\n@@ -80,10 +81,12 @@\n raise ParameterError(\"Invalid refill amount: {!r}\".format(amount))\n (res, err, otp_dict) = token_obj.get_multi_otp(count=amount, counter_index=True)\n otps = otp_dict.get(\"otp\")\n- for key in otps.keys():\n+ prepend_pin = get_prepend_pin()\n+ for key, otp in otps.items():\n # Return the hash of OTP PIN and OTP values\n+ otppw = otppin + otp if prepend_pin else otp + otppin\n otps[key] = pbkdf2_sha512.using(\n- rounds=rounds, salt_size=10).hash(otppin + otps.get(key))\n+ rounds=rounds, salt_size=10).hash(otppw)\n # We do not disable the token, so if all offline OTP values\n # are used, the token can be used the authenticate online again.\n # token_obj.enable(False)\n", "issue": "OTP+PIN does not work offline\n<!--\r\nThank you for filing a bug within privacyIDEA. The more information you provide, the easier it is to identify the problem!\r\n\r\nIssues are not meant for asking questions! You're welcome to ask questions in our community forum: https://community.privacyidea.org\r\n\r\nprivacyIDEA is licensed under the AGPL and comes without warranty. If you require reliable, professional help, please consider getting either\r\n- consultancy \r\n (https://netknights.it/en/leistungen/one-time-services/)\r\n- or the privacyIDEA Enterprise Edition \r\n (https://netknights.it/en/leistungen/service-level-agreements/)\r\n-->\r\n\r\n### Top-level intent\r\n\r\nDo offline with an OTP pin that is used *behind* the OTP value: OTP+PIN.\r\n\r\n### Expected outcome\r\n\r\nBe able to authenticate offline with OTP+PIN, \r\n\r\n### Actual outcome\r\n\r\nThe online authentication works out, all offline authentications fail.\r\n\r\n### Solution\r\n\r\nThe offline data for HOTP is created by in a fixed manner concatenate PIN+OTP here:\r\n\r\nhttps://github.com/privacyidea/privacyidea/blob/1eb6c8ff6b283ae75d62e5d2c2635e55276d5515/privacyidea/lib/applications/offline.py#L86\r\n\r\nThis means the user will try to authenticate with OTP+PIN and will always fail.\r\nWe will have to get the config value of \"prepend PIN\" and use it to either concatenate PIN+OTP or OTP+PIN.\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# 2015-04-08 Cornelius K\u00f6lbel <[email protected]>\n# Add options ROUNDS to avoid timeouts during OTP hash calculation\n# 2015-04-03 Cornelius K\u00f6lbel <[email protected]>\n# Use pbkdf2 for OTP hashing\n# 2015-03-13 Cornelius K\u00f6lbel, <[email protected]>\n# initial writeup\n#\n# License: AGPLv3\n# contact: http://www.privacyidea.org\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\nfrom privacyidea.lib.applications import MachineApplicationBase\nfrom privacyidea.lib.crypto import geturandom\nfrom privacyidea.lib.error import ValidateError, ParameterError\nimport logging\nfrom passlib.hash import pbkdf2_sha512\nfrom privacyidea.lib.token import get_tokens\nfrom privacyidea.lib.policy import TYPE\nlog = logging.getLogger(__name__)\nROUNDS = 6549\nREFILLTOKEN_LENGTH = 40\n\n\nclass MachineApplication(MachineApplicationBase):\n \"\"\"\n This is the application for Offline authentication with PAM or\n the privacyIDEA credential provider.\n\n The machine application returns a list of salted OTP hashes to be used with\n offline authentication. The token then is disabled, so that it can not\n be used for online authentication anymore, to avoid reusing a fished OTP\n value.\n\n The server stores the information, which OTP values were issued.\n\n options options:\n * user: a username.\n * count: is the number of OTP values returned\n\n \"\"\"\n application_name = \"offline\"\n\n @staticmethod\n def generate_new_refilltoken(token_obj):\n \"\"\"\n Generate new refill token and store it in the tokeninfo of the token.\n :param token_obj: token in question\n :return: a string\n \"\"\"\n new_refilltoken = geturandom(REFILLTOKEN_LENGTH, hex=True)\n token_obj.add_tokeninfo(\"refilltoken\", new_refilltoken)\n return new_refilltoken\n\n @staticmethod\n def get_offline_otps(token_obj, otppin, amount, rounds=ROUNDS):\n \"\"\"\n Retrieve the desired number of passwords (= PIN + OTP), hash them\n and return them in a dictionary. Increase the token counter.\n :param token_obj: token in question\n :param otppin: The OTP PIN to prepend in the passwords. The PIN is not validated!\n :param amount: Number of OTP values (non-negative!)\n :param rounds: Number of PBKDF2 rounds\n :return: dictionary\n \"\"\"\n if amount < 0:\n raise ParameterError(\"Invalid refill amount: {!r}\".format(amount))\n (res, err, otp_dict) = token_obj.get_multi_otp(count=amount, counter_index=True)\n otps = otp_dict.get(\"otp\")\n for key in otps.keys():\n # Return the hash of OTP PIN and OTP values\n otps[key] = pbkdf2_sha512.using(\n rounds=rounds, salt_size=10).hash(otppin + otps.get(key))\n # We do not disable the token, so if all offline OTP values\n # are used, the token can be used the authenticate online again.\n # token_obj.enable(False)\n # increase the counter by the consumed values and\n # also store it in tokeninfo.\n token_obj.inc_otp_counter(increment=amount)\n\n return otps\n\n @staticmethod\n def get_refill(token_obj, password, options=None):\n \"\"\"\n Returns new authentication OTPs to refill the client\n\n To do so we also verify the password, which may consist of PIN + OTP.\n\n :param token_obj: Token object\n :param password: PIN + OTP\n :param options: dict that might contain \"count\" and \"rounds\"\n :return: a dictionary of auth items\n \"\"\"\n options = options or {}\n count = int(options.get(\"count\", 100))\n rounds = int(options.get(\"rounds\", ROUNDS))\n _r, otppin, otpval = token_obj.split_pin_pass(password)\n if not _r:\n raise ParameterError(\"Could not split password\")\n current_token_counter = token_obj.token.count\n first_offline_counter = current_token_counter - count\n if first_offline_counter < 0:\n first_offline_counter = 0\n # find the value in the offline OTP values! This resets the token.count!\n matching_count = token_obj.check_otp(otpval, first_offline_counter, count)\n token_obj.set_otp_count(current_token_counter)\n # Raise an exception *after* we reset the token counter\n if matching_count < 0:\n raise ValidateError(\"You provided a wrong OTP value.\")\n # We have to add 1 here: Assume *first_offline_counter* is the counter value of the first offline OTP\n # we sent to the client. Assume the client then requests a refill with that exact OTP value.\n # Then, we need to respond with a refill of one OTP value, as the client has consumed one OTP value.\n counter_diff = matching_count - first_offline_counter + 1\n otps = MachineApplication.get_offline_otps(token_obj, otppin, counter_diff, rounds)\n token_obj.add_tokeninfo(key=\"offline_counter\",\n value=count)\n return otps\n\n @staticmethod\n def get_authentication_item(token_type,\n serial,\n challenge=None, options=None,\n filter_param=None):\n \"\"\"\n :param token_type: the type of the token. At the moment\n we only support \"HOTP\" token. Supporting time\n based tokens is difficult, since we would have to\n return a looooong list of OTP values.\n Supporting \"yubikey\" token (AES) would be\n possible, too.\n :param serial: the serial number of the token.\n :param challenge: This can contain the password (otp pin + otp\n value) so that we can put the OTP PIN into the hashed response.\n :type challenge: basestring\n :return auth_item: A list of hashed OTP values\n \"\"\"\n ret = {}\n options = options or {}\n password = challenge\n if token_type.lower() == \"hotp\":\n tokens = get_tokens(serial=serial)\n if len(tokens) == 1:\n token_obj = tokens[0]\n if password:\n _r, otppin, _ = token_obj.split_pin_pass(password)\n if not _r:\n raise ParameterError(\"Could not split password\")\n else:\n otppin = \"\"\n otps = MachineApplication.get_offline_otps(token_obj,\n otppin,\n int(options.get(\"count\", 100)),\n int(options.get(\"rounds\", ROUNDS)))\n refilltoken = MachineApplication.generate_new_refilltoken(token_obj)\n ret[\"response\"] = otps\n ret[\"refilltoken\"] = refilltoken\n user_object = token_obj.user\n if user_object:\n uInfo = user_object.info\n if \"username\" in uInfo:\n ret[\"user\"] = ret[\"username\"] = uInfo.get(\"username\")\n\n else:\n log.info(\"Token %r, type %r is not supported by \"\n \"OFFLINE application module\" % (serial, token_type))\n\n return ret\n\n @staticmethod\n def get_options():\n \"\"\"\n returns a dictionary with a list of required and optional options\n \"\"\"\n return {'count': {'type': TYPE.STRING},\n 'rounds': {'type': TYPE.STRING}}\n", "path": "privacyidea/lib/applications/offline.py"}]} | 3,165 | 346 |
gh_patches_debug_7882 | rasdani/github-patches | git_diff | numpy__numpy-15189 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
TST: Add the first test using hypothesis
This pull request adds the first test that uses hypothesis and hence brings in hypothesis as an additional test dependency.
@mattip Could you take a look at this please?
</issue>
<code>
[start of numpy/conftest.py]
1 """
2 Pytest configuration and fixtures for the Numpy test suite.
3 """
4 import os
5
6 import pytest
7 import numpy
8
9 from numpy.core._multiarray_tests import get_fpu_mode
10
11
12 _old_fpu_mode = None
13 _collect_results = {}
14
15
16 def pytest_configure(config):
17 config.addinivalue_line("markers",
18 "valgrind_error: Tests that are known to error under valgrind.")
19 config.addinivalue_line("markers",
20 "leaks_references: Tests that are known to leak references.")
21 config.addinivalue_line("markers",
22 "slow: Tests that are very slow.")
23
24
25 def pytest_addoption(parser):
26 parser.addoption("--available-memory", action="store", default=None,
27 help=("Set amount of memory available for running the "
28 "test suite. This can result to tests requiring "
29 "especially large amounts of memory to be skipped. "
30 "Equivalent to setting environment variable "
31 "NPY_AVAILABLE_MEM. Default: determined"
32 "automatically."))
33
34
35 def pytest_sessionstart(session):
36 available_mem = session.config.getoption('available_memory')
37 if available_mem is not None:
38 os.environ['NPY_AVAILABLE_MEM'] = available_mem
39
40
41 #FIXME when yield tests are gone.
42 @pytest.hookimpl()
43 def pytest_itemcollected(item):
44 """
45 Check FPU precision mode was not changed during test collection.
46
47 The clumsy way we do it here is mainly necessary because numpy
48 still uses yield tests, which can execute code at test collection
49 time.
50 """
51 global _old_fpu_mode
52
53 mode = get_fpu_mode()
54
55 if _old_fpu_mode is None:
56 _old_fpu_mode = mode
57 elif mode != _old_fpu_mode:
58 _collect_results[item] = (_old_fpu_mode, mode)
59 _old_fpu_mode = mode
60
61
62 @pytest.fixture(scope="function", autouse=True)
63 def check_fpu_mode(request):
64 """
65 Check FPU precision mode was not changed during the test.
66 """
67 old_mode = get_fpu_mode()
68 yield
69 new_mode = get_fpu_mode()
70
71 if old_mode != new_mode:
72 raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
73 " during the test".format(old_mode, new_mode))
74
75 collect_result = _collect_results.get(request.node)
76 if collect_result is not None:
77 old_mode, new_mode = collect_result
78 raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
79 " when collecting the test".format(old_mode,
80 new_mode))
81
82
83 @pytest.fixture(autouse=True)
84 def add_np(doctest_namespace):
85 doctest_namespace['np'] = numpy
86
[end of numpy/conftest.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/numpy/conftest.py b/numpy/conftest.py
--- a/numpy/conftest.py
+++ b/numpy/conftest.py
@@ -3,6 +3,7 @@
"""
import os
+import hypothesis
import pytest
import numpy
@@ -12,6 +13,12 @@
_old_fpu_mode = None
_collect_results = {}
+# See https://hypothesis.readthedocs.io/en/latest/settings.html
+hypothesis.settings.register_profile(
+ name="numpy-profile", deadline=None, print_blob=True,
+)
+hypothesis.settings.load_profile("numpy-profile")
+
def pytest_configure(config):
config.addinivalue_line("markers",
| {"golden_diff": "diff --git a/numpy/conftest.py b/numpy/conftest.py\n--- a/numpy/conftest.py\n+++ b/numpy/conftest.py\n@@ -3,6 +3,7 @@\n \"\"\"\n import os\n \n+import hypothesis\n import pytest\n import numpy\n \n@@ -12,6 +13,12 @@\n _old_fpu_mode = None\n _collect_results = {}\n \n+# See https://hypothesis.readthedocs.io/en/latest/settings.html\n+hypothesis.settings.register_profile(\n+ name=\"numpy-profile\", deadline=None, print_blob=True,\n+)\n+hypothesis.settings.load_profile(\"numpy-profile\")\n+\n \n def pytest_configure(config):\n config.addinivalue_line(\"markers\",\n", "issue": "TST: Add the first test using hypothesis\nThis pull request adds the first test that uses hypothesis and hence brings in hypothesis as an additional test dependency.\r\n\r\n@mattip Could you take a look at this please?\r\n\n", "before_files": [{"content": "\"\"\"\nPytest configuration and fixtures for the Numpy test suite.\n\"\"\"\nimport os\n\nimport pytest\nimport numpy\n\nfrom numpy.core._multiarray_tests import get_fpu_mode\n\n\n_old_fpu_mode = None\n_collect_results = {}\n\n\ndef pytest_configure(config):\n config.addinivalue_line(\"markers\",\n \"valgrind_error: Tests that are known to error under valgrind.\")\n config.addinivalue_line(\"markers\",\n \"leaks_references: Tests that are known to leak references.\")\n config.addinivalue_line(\"markers\",\n \"slow: Tests that are very slow.\")\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--available-memory\", action=\"store\", default=None,\n help=(\"Set amount of memory available for running the \"\n \"test suite. This can result to tests requiring \"\n \"especially large amounts of memory to be skipped. \"\n \"Equivalent to setting environment variable \"\n \"NPY_AVAILABLE_MEM. Default: determined\"\n \"automatically.\"))\n\n\ndef pytest_sessionstart(session):\n available_mem = session.config.getoption('available_memory')\n if available_mem is not None:\n os.environ['NPY_AVAILABLE_MEM'] = available_mem\n\n\n#FIXME when yield tests are gone.\[email protected]()\ndef pytest_itemcollected(item):\n \"\"\"\n Check FPU precision mode was not changed during test collection.\n\n The clumsy way we do it here is mainly necessary because numpy\n still uses yield tests, which can execute code at test collection\n time.\n \"\"\"\n global _old_fpu_mode\n\n mode = get_fpu_mode()\n\n if _old_fpu_mode is None:\n _old_fpu_mode = mode\n elif mode != _old_fpu_mode:\n _collect_results[item] = (_old_fpu_mode, mode)\n _old_fpu_mode = mode\n\n\[email protected](scope=\"function\", autouse=True)\ndef check_fpu_mode(request):\n \"\"\"\n Check FPU precision mode was not changed during the test.\n \"\"\"\n old_mode = get_fpu_mode()\n yield\n new_mode = get_fpu_mode()\n\n if old_mode != new_mode:\n raise AssertionError(\"FPU precision mode changed from {0:#x} to {1:#x}\"\n \" during the test\".format(old_mode, new_mode))\n\n collect_result = _collect_results.get(request.node)\n if collect_result is not None:\n old_mode, new_mode = collect_result\n raise AssertionError(\"FPU precision mode changed from {0:#x} to {1:#x}\"\n \" when collecting the test\".format(old_mode,\n new_mode))\n\n\[email protected](autouse=True)\ndef add_np(doctest_namespace):\n doctest_namespace['np'] = numpy\n", "path": "numpy/conftest.py"}]} | 1,338 | 153 |
gh_patches_debug_36207 | rasdani/github-patches | git_diff | huggingface__transformers-5972 | You will be provided with a partial code base and an issue statement explaining a problem to resolve.
<issue>
convert_roberta: AttributeError when converting CamemBERT model.pt to pytorch_model.bin
Hi,
I trained a CamemBERT model with the fairseq library which gave me the following files:
- dict.txt: vocabulary coming from the sentencepiece model
- sentencepiece.bpe.model
- model.pt
Now I am trying to convert the model.pt into pytorch_model.bin and config.json as mentionned here ([fairseq/issues#1514](https://github.com/pytorch/fairseq/issues/1514)) and here ([transformers/issue#1850](https://github.com/huggingface/transformers/issues/1850)), by using the conversion script of the transformers library ([transfomers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/blob/master/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py)). The goal is to use those files with fast-bert.
However, using this command line:
```shell
python convert_roberta_original_pytorch_checkpoint_to_pytorch.py --roberta_checkpoint_path ./ --pytorch_dump_folder_path ./ --classification_head
```
I get the following error:
```python
AttributeError Traceback (most recent call last)
<ipython-input-27-ea791887ff26> in <module>
----> 1 convert_roberta_original_pytorch_checkpoint_to_pytorch.convert_roberta_checkpoint_to_pytorch(CAMEMBERT_PATH, CAMEMBERT_PATH, True)
~/anaconda3/envs/NLP/lib/python3.7/site-packages/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py in convert_roberta_checkpoint_to_pytorch(roberta_checkpoint_path, pytorch_dump_folder_path, classification_head)
48 roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)
49 roberta.eval() # disable dropout
---> 50 roberta_sent_encoder = roberta.model.decoder.sentence_encoder
51 config = RobertaConfig(
52 vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,
~/anaconda3/envs/NLP/lib/python3.7/site-packages/torch/nn/modules/module.py in __getattr__(self, name)
592 return modules[name]
593 raise AttributeError("'{}' object has no attribute '{}'".format(
--> 594 type(self).__name__, name))
595
596 def __setattr__(self, name, value):
AttributeError: 'RobertaModel' object has no attribute 'decoder'
```
And indeed when I check the fairseq/pytorch RobertaModel has no decoder attribute.
Am I doing this wrong ? I see no other conversion script to fit my CamemBERT model so I guess the RoBERTa one is the good one.
</issue>
<code>
[start of src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py]
1 # coding=utf-8
2 # Copyright 2018 The HuggingFace Inc. team.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """Convert RoBERTa checkpoint."""
16
17
18 import argparse
19 import logging
20 import pathlib
21
22 import fairseq
23 import torch
24 from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
25 from fairseq.modules import TransformerSentenceEncoderLayer
26 from packaging import version
27
28 from transformers.modeling_bert import BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput
29 from transformers.modeling_roberta import RobertaConfig, RobertaForMaskedLM, RobertaForSequenceClassification
30
31
32 if version.parse(fairseq.__version__) < version.parse("0.9.0"):
33 raise Exception("requires fairseq >= 0.9.0")
34
35
36 logging.basicConfig(level=logging.INFO)
37 logger = logging.getLogger(__name__)
38
39 SAMPLE_TEXT = "Hello world! cécé herlolip"
40
41
42 def convert_roberta_checkpoint_to_pytorch(
43 roberta_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool
44 ):
45 """
46 Copy/paste/tweak roberta's weights to our BERT structure.
47 """
48 roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)
49 roberta.eval() # disable dropout
50 roberta_sent_encoder = roberta.model.decoder.sentence_encoder
51 config = RobertaConfig(
52 vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,
53 hidden_size=roberta.args.encoder_embed_dim,
54 num_hidden_layers=roberta.args.encoder_layers,
55 num_attention_heads=roberta.args.encoder_attention_heads,
56 intermediate_size=roberta.args.encoder_ffn_embed_dim,
57 max_position_embeddings=514,
58 type_vocab_size=1,
59 layer_norm_eps=1e-5, # PyTorch default used in fairseq
60 )
61 if classification_head:
62 config.num_labels = roberta.args.num_classes
63 print("Our BERT config:", config)
64
65 model = RobertaForSequenceClassification(config) if classification_head else RobertaForMaskedLM(config)
66 model.eval()
67
68 # Now let's copy all the weights.
69 # Embeddings
70 model.roberta.embeddings.word_embeddings.weight = roberta_sent_encoder.embed_tokens.weight
71 model.roberta.embeddings.position_embeddings.weight = roberta_sent_encoder.embed_positions.weight
72 model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like(
73 model.roberta.embeddings.token_type_embeddings.weight
74 ) # just zero them out b/c RoBERTa doesn't use them.
75 model.roberta.embeddings.LayerNorm.weight = roberta_sent_encoder.emb_layer_norm.weight
76 model.roberta.embeddings.LayerNorm.bias = roberta_sent_encoder.emb_layer_norm.bias
77
78 for i in range(config.num_hidden_layers):
79 # Encoder: start of layer
80 layer: BertLayer = model.roberta.encoder.layer[i]
81 roberta_layer: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
82
83 # self attention
84 self_attn: BertSelfAttention = layer.attention.self
85 assert (
86 roberta_layer.self_attn.k_proj.weight.data.shape
87 == roberta_layer.self_attn.q_proj.weight.data.shape
88 == roberta_layer.self_attn.v_proj.weight.data.shape
89 == torch.Size((config.hidden_size, config.hidden_size))
90 )
91
92 self_attn.query.weight.data = roberta_layer.self_attn.q_proj.weight
93 self_attn.query.bias.data = roberta_layer.self_attn.q_proj.bias
94 self_attn.key.weight.data = roberta_layer.self_attn.k_proj.weight
95 self_attn.key.bias.data = roberta_layer.self_attn.k_proj.bias
96 self_attn.value.weight.data = roberta_layer.self_attn.v_proj.weight
97 self_attn.value.bias.data = roberta_layer.self_attn.v_proj.bias
98
99 # self-attention output
100 self_output: BertSelfOutput = layer.attention.output
101 assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
102 self_output.dense.weight = roberta_layer.self_attn.out_proj.weight
103 self_output.dense.bias = roberta_layer.self_attn.out_proj.bias
104 self_output.LayerNorm.weight = roberta_layer.self_attn_layer_norm.weight
105 self_output.LayerNorm.bias = roberta_layer.self_attn_layer_norm.bias
106
107 # intermediate
108 intermediate: BertIntermediate = layer.intermediate
109 assert intermediate.dense.weight.shape == roberta_layer.fc1.weight.shape
110 intermediate.dense.weight = roberta_layer.fc1.weight
111 intermediate.dense.bias = roberta_layer.fc1.bias
112
113 # output
114 bert_output: BertOutput = layer.output
115 assert bert_output.dense.weight.shape == roberta_layer.fc2.weight.shape
116 bert_output.dense.weight = roberta_layer.fc2.weight
117 bert_output.dense.bias = roberta_layer.fc2.bias
118 bert_output.LayerNorm.weight = roberta_layer.final_layer_norm.weight
119 bert_output.LayerNorm.bias = roberta_layer.final_layer_norm.bias
120 # end of layer
121
122 if classification_head:
123 model.classifier.dense.weight = roberta.model.classification_heads["mnli"].dense.weight
124 model.classifier.dense.bias = roberta.model.classification_heads["mnli"].dense.bias
125 model.classifier.out_proj.weight = roberta.model.classification_heads["mnli"].out_proj.weight
126 model.classifier.out_proj.bias = roberta.model.classification_heads["mnli"].out_proj.bias
127 else:
128 # LM Head
129 model.lm_head.dense.weight = roberta.model.decoder.lm_head.dense.weight
130 model.lm_head.dense.bias = roberta.model.decoder.lm_head.dense.bias
131 model.lm_head.layer_norm.weight = roberta.model.decoder.lm_head.layer_norm.weight
132 model.lm_head.layer_norm.bias = roberta.model.decoder.lm_head.layer_norm.bias
133 model.lm_head.decoder.weight = roberta.model.decoder.lm_head.weight
134 model.lm_head.decoder.bias = roberta.model.decoder.lm_head.bias
135
136 # Let's check that we get the same results.
137 input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1
138
139 our_output = model(input_ids)[0]
140 if classification_head:
141 their_output = roberta.model.classification_heads["mnli"](roberta.extract_features(input_ids))
142 else:
143 their_output = roberta.model(input_ids)[0]
144 print(our_output.shape, their_output.shape)
145 max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
146 print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
147 success = torch.allclose(our_output, their_output, atol=1e-3)
148 print("Do both models output the same tensors?", "🔥" if success else "💩")
149 if not success:
150 raise Exception("Something went wRoNg")
151
152 pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)
153 print(f"Saving model to {pytorch_dump_folder_path}")
154 model.save_pretrained(pytorch_dump_folder_path)
155
156
157 if __name__ == "__main__":
158 parser = argparse.ArgumentParser()
159 # Required parameters
160 parser.add_argument(
161 "--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
162 )
163 parser.add_argument(
164 "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
165 )
166 parser.add_argument(
167 "--classification_head", action="store_true", help="Whether to convert a final classification head."
168 )
169 args = parser.parse_args()
170 convert_roberta_checkpoint_to_pytorch(
171 args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
172 )
173
[end of src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py]
</code>
I need you to solve this issue by generating a single patch file that I can apply directly to this repository using git apply. Please respond with a single patch file in the following format.
<patch>
diff --git a/file.py b/file.py
--- a/file.py
+++ b/file.py
@@ -1,27 +1,35 @@
def euclidean(a, b):
- while b:
- a, b = b, a % b
- return a
+ if b == 0:
+ return a
+ return euclidean(b, a % b)
def bresenham(x0, y0, x1, y1):
points = []
dx = abs(x1 - x0)
dy = abs(y1 - y0)
- sx = 1 if x0 < x1 else -1
- sy = 1 if y0 < y1 else -1
- err = dx - dy
+ x, y = x0, y0
+ sx = -1 if x0 > x1 else 1
+ sy = -1 if y0 > y1 else 1
- while True:
- points.append((x0, y0))
- if x0 == x1 and y0 == y1:
- break
- e2 = 2 * err
- if e2 > -dy:
- err -= dy
- x0 += sx
- if e2 < dx:
- err += dx
- y0 += sy
+ if dx > dy:
+ err = dx / 2.0
+ while x != x1:
+ points.append((x, y))
+ err -= dy
+ if err < 0:
+ y += sy
+ err += dx
+ x += sx
+ else:
+ err = dy / 2.0
+ while y != y1:
+ points.append((x, y))
+ err -= dx
+ if err < 0:
+ x += sx
+ err += dy
+ y += sy
+
+ points.append((x, y))
return points
</patch> | diff --git a/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py
--- a/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py
+++ b/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py
@@ -47,7 +47,7 @@
"""
roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)
roberta.eval() # disable dropout
- roberta_sent_encoder = roberta.model.decoder.sentence_encoder
+ roberta_sent_encoder = roberta.model.encoder.sentence_encoder
config = RobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,
hidden_size=roberta.args.encoder_embed_dim,
@@ -59,7 +59,7 @@
layer_norm_eps=1e-5, # PyTorch default used in fairseq
)
if classification_head:
- config.num_labels = roberta.args.num_classes
+ config.num_labels = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our BERT config:", config)
model = RobertaForSequenceClassification(config) if classification_head else RobertaForMaskedLM(config)
@@ -126,12 +126,12 @@
model.classifier.out_proj.bias = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
- model.lm_head.dense.weight = roberta.model.decoder.lm_head.dense.weight
- model.lm_head.dense.bias = roberta.model.decoder.lm_head.dense.bias
- model.lm_head.layer_norm.weight = roberta.model.decoder.lm_head.layer_norm.weight
- model.lm_head.layer_norm.bias = roberta.model.decoder.lm_head.layer_norm.bias
- model.lm_head.decoder.weight = roberta.model.decoder.lm_head.weight
- model.lm_head.decoder.bias = roberta.model.decoder.lm_head.bias
+ model.lm_head.dense.weight = roberta.model.encoder.lm_head.dense.weight
+ model.lm_head.dense.bias = roberta.model.encoder.lm_head.dense.bias
+ model.lm_head.layer_norm.weight = roberta.model.encoder.lm_head.layer_norm.weight
+ model.lm_head.layer_norm.bias = roberta.model.encoder.lm_head.layer_norm.bias
+ model.lm_head.decoder.weight = roberta.model.encoder.lm_head.weight
+ model.lm_head.decoder.bias = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1
| {"golden_diff": "diff --git a/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py b/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py\n--- a/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py\n+++ b/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py\n@@ -47,7 +47,7 @@\n \"\"\"\n roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)\n roberta.eval() # disable dropout\n- roberta_sent_encoder = roberta.model.decoder.sentence_encoder\n+ roberta_sent_encoder = roberta.model.encoder.sentence_encoder\n config = RobertaConfig(\n vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,\n hidden_size=roberta.args.encoder_embed_dim,\n@@ -59,7 +59,7 @@\n layer_norm_eps=1e-5, # PyTorch default used in fairseq\n )\n if classification_head:\n- config.num_labels = roberta.args.num_classes\n+ config.num_labels = roberta.model.classification_heads[\"mnli\"].out_proj.weight.shape[0]\n print(\"Our BERT config:\", config)\n \n model = RobertaForSequenceClassification(config) if classification_head else RobertaForMaskedLM(config)\n@@ -126,12 +126,12 @@\n model.classifier.out_proj.bias = roberta.model.classification_heads[\"mnli\"].out_proj.bias\n else:\n # LM Head\n- model.lm_head.dense.weight = roberta.model.decoder.lm_head.dense.weight\n- model.lm_head.dense.bias = roberta.model.decoder.lm_head.dense.bias\n- model.lm_head.layer_norm.weight = roberta.model.decoder.lm_head.layer_norm.weight\n- model.lm_head.layer_norm.bias = roberta.model.decoder.lm_head.layer_norm.bias\n- model.lm_head.decoder.weight = roberta.model.decoder.lm_head.weight\n- model.lm_head.decoder.bias = roberta.model.decoder.lm_head.bias\n+ model.lm_head.dense.weight = roberta.model.encoder.lm_head.dense.weight\n+ model.lm_head.dense.bias = roberta.model.encoder.lm_head.dense.bias\n+ model.lm_head.layer_norm.weight = roberta.model.encoder.lm_head.layer_norm.weight\n+ model.lm_head.layer_norm.bias = roberta.model.encoder.lm_head.layer_norm.bias\n+ model.lm_head.decoder.weight = roberta.model.encoder.lm_head.weight\n+ model.lm_head.decoder.bias = roberta.model.encoder.lm_head.bias\n \n # Let's check that we get the same results.\n input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1\n", "issue": "convert_roberta: AttributeError when converting CamemBERT model.pt to pytorch_model.bin\nHi,\r\n\r\nI trained a CamemBERT model with the fairseq library which gave me the following files:\r\n- dict.txt: vocabulary coming from the sentencepiece model\r\n- sentencepiece.bpe.model\r\n- model.pt\r\n\r\nNow I am trying to convert the model.pt into pytorch_model.bin and config.json as mentionned here ([fairseq/issues#1514](https://github.com/pytorch/fairseq/issues/1514)) and here ([transformers/issue#1850](https://github.com/huggingface/transformers/issues/1850)), by using the conversion script of the transformers library ([transfomers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/blob/master/src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py)). The goal is to use those files with fast-bert.\r\n\r\nHowever, using this command line:\r\n```shell\r\npython convert_roberta_original_pytorch_checkpoint_to_pytorch.py --roberta_checkpoint_path ./ --pytorch_dump_folder_path ./ --classification_head\r\n```\r\n\r\nI get the following error:\r\n\r\n```python\r\nAttributeError Traceback (most recent call last)\r\n<ipython-input-27-ea791887ff26> in <module>\r\n----> 1 convert_roberta_original_pytorch_checkpoint_to_pytorch.convert_roberta_checkpoint_to_pytorch(CAMEMBERT_PATH, CAMEMBERT_PATH, True)\r\n\r\n~/anaconda3/envs/NLP/lib/python3.7/site-packages/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py in convert_roberta_checkpoint_to_pytorch(roberta_checkpoint_path, pytorch_dump_folder_path, classification_head)\r\n 48 roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)\r\n 49 roberta.eval() # disable dropout\r\n---> 50 roberta_sent_encoder = roberta.model.decoder.sentence_encoder\r\n 51 config = RobertaConfig(\r\n 52 vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,\r\n\r\n~/anaconda3/envs/NLP/lib/python3.7/site-packages/torch/nn/modules/module.py in __getattr__(self, name)\r\n 592 return modules[name]\r\n 593 raise AttributeError(\"'{}' object has no attribute '{}'\".format(\r\n--> 594 type(self).__name__, name))\r\n 595 \r\n 596 def __setattr__(self, name, value):\r\n\r\nAttributeError: 'RobertaModel' object has no attribute 'decoder'\r\n```\r\n\r\nAnd indeed when I check the fairseq/pytorch RobertaModel has no decoder attribute. \r\n\r\nAm I doing this wrong ? I see no other conversion script to fit my CamemBERT model so I guess the RoBERTa one is the good one.\r\n\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2018 The HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Convert RoBERTa checkpoint.\"\"\"\n\n\nimport argparse\nimport logging\nimport pathlib\n\nimport fairseq\nimport torch\nfrom fairseq.models.roberta import RobertaModel as FairseqRobertaModel\nfrom fairseq.modules import TransformerSentenceEncoderLayer\nfrom packaging import version\n\nfrom transformers.modeling_bert import BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput\nfrom transformers.modeling_roberta import RobertaConfig, RobertaForMaskedLM, RobertaForSequenceClassification\n\n\nif version.parse(fairseq.__version__) < version.parse(\"0.9.0\"):\n raise Exception(\"requires fairseq >= 0.9.0\")\n\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nSAMPLE_TEXT = \"Hello world! c\u00e9c\u00e9 herlolip\"\n\n\ndef convert_roberta_checkpoint_to_pytorch(\n roberta_checkpoint_path: str, pytorch_dump_folder_path: str, classification_head: bool\n):\n \"\"\"\n Copy/paste/tweak roberta's weights to our BERT structure.\n \"\"\"\n roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)\n roberta.eval() # disable dropout\n roberta_sent_encoder = roberta.model.decoder.sentence_encoder\n config = RobertaConfig(\n vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings,\n hidden_size=roberta.args.encoder_embed_dim,\n num_hidden_layers=roberta.args.encoder_layers,\n num_attention_heads=roberta.args.encoder_attention_heads,\n intermediate_size=roberta.args.encoder_ffn_embed_dim,\n max_position_embeddings=514,\n type_vocab_size=1,\n layer_norm_eps=1e-5, # PyTorch default used in fairseq\n )\n if classification_head:\n config.num_labels = roberta.args.num_classes\n print(\"Our BERT config:\", config)\n\n model = RobertaForSequenceClassification(config) if classification_head else RobertaForMaskedLM(config)\n model.eval()\n\n # Now let's copy all the weights.\n # Embeddings\n model.roberta.embeddings.word_embeddings.weight = roberta_sent_encoder.embed_tokens.weight\n model.roberta.embeddings.position_embeddings.weight = roberta_sent_encoder.embed_positions.weight\n model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like(\n model.roberta.embeddings.token_type_embeddings.weight\n ) # just zero them out b/c RoBERTa doesn't use them.\n model.roberta.embeddings.LayerNorm.weight = roberta_sent_encoder.emb_layer_norm.weight\n model.roberta.embeddings.LayerNorm.bias = roberta_sent_encoder.emb_layer_norm.bias\n\n for i in range(config.num_hidden_layers):\n # Encoder: start of layer\n layer: BertLayer = model.roberta.encoder.layer[i]\n roberta_layer: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]\n\n # self attention\n self_attn: BertSelfAttention = layer.attention.self\n assert (\n roberta_layer.self_attn.k_proj.weight.data.shape\n == roberta_layer.self_attn.q_proj.weight.data.shape\n == roberta_layer.self_attn.v_proj.weight.data.shape\n == torch.Size((config.hidden_size, config.hidden_size))\n )\n\n self_attn.query.weight.data = roberta_layer.self_attn.q_proj.weight\n self_attn.query.bias.data = roberta_layer.self_attn.q_proj.bias\n self_attn.key.weight.data = roberta_layer.self_attn.k_proj.weight\n self_attn.key.bias.data = roberta_layer.self_attn.k_proj.bias\n self_attn.value.weight.data = roberta_layer.self_attn.v_proj.weight\n self_attn.value.bias.data = roberta_layer.self_attn.v_proj.bias\n\n # self-attention output\n self_output: BertSelfOutput = layer.attention.output\n assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape\n self_output.dense.weight = roberta_layer.self_attn.out_proj.weight\n self_output.dense.bias = roberta_layer.self_attn.out_proj.bias\n self_output.LayerNorm.weight = roberta_layer.self_attn_layer_norm.weight\n self_output.LayerNorm.bias = roberta_layer.self_attn_layer_norm.bias\n\n # intermediate\n intermediate: BertIntermediate = layer.intermediate\n assert intermediate.dense.weight.shape == roberta_layer.fc1.weight.shape\n intermediate.dense.weight = roberta_layer.fc1.weight\n intermediate.dense.bias = roberta_layer.fc1.bias\n\n # output\n bert_output: BertOutput = layer.output\n assert bert_output.dense.weight.shape == roberta_layer.fc2.weight.shape\n bert_output.dense.weight = roberta_layer.fc2.weight\n bert_output.dense.bias = roberta_layer.fc2.bias\n bert_output.LayerNorm.weight = roberta_layer.final_layer_norm.weight\n bert_output.LayerNorm.bias = roberta_layer.final_layer_norm.bias\n # end of layer\n\n if classification_head:\n model.classifier.dense.weight = roberta.model.classification_heads[\"mnli\"].dense.weight\n model.classifier.dense.bias = roberta.model.classification_heads[\"mnli\"].dense.bias\n model.classifier.out_proj.weight = roberta.model.classification_heads[\"mnli\"].out_proj.weight\n model.classifier.out_proj.bias = roberta.model.classification_heads[\"mnli\"].out_proj.bias\n else:\n # LM Head\n model.lm_head.dense.weight = roberta.model.decoder.lm_head.dense.weight\n model.lm_head.dense.bias = roberta.model.decoder.lm_head.dense.bias\n model.lm_head.layer_norm.weight = roberta.model.decoder.lm_head.layer_norm.weight\n model.lm_head.layer_norm.bias = roberta.model.decoder.lm_head.layer_norm.bias\n model.lm_head.decoder.weight = roberta.model.decoder.lm_head.weight\n model.lm_head.decoder.bias = roberta.model.decoder.lm_head.bias\n\n # Let's check that we get the same results.\n input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1\n\n our_output = model(input_ids)[0]\n if classification_head:\n their_output = roberta.model.classification_heads[\"mnli\"](roberta.extract_features(input_ids))\n else:\n their_output = roberta.model(input_ids)[0]\n print(our_output.shape, their_output.shape)\n max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()\n print(f\"max_absolute_diff = {max_absolute_diff}\") # ~ 1e-7\n success = torch.allclose(our_output, their_output, atol=1e-3)\n print(\"Do both models output the same tensors?\", \"\ud83d\udd25\" if success else \"\ud83d\udca9\")\n if not success:\n raise Exception(\"Something went wRoNg\")\n\n pathlib.Path(pytorch_dump_folder_path).mkdir(parents=True, exist_ok=True)\n print(f\"Saving model to {pytorch_dump_folder_path}\")\n model.save_pretrained(pytorch_dump_folder_path)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n # Required parameters\n parser.add_argument(\n \"--roberta_checkpoint_path\", default=None, type=str, required=True, help=\"Path the official PyTorch dump.\"\n )\n parser.add_argument(\n \"--pytorch_dump_folder_path\", default=None, type=str, required=True, help=\"Path to the output PyTorch model.\"\n )\n parser.add_argument(\n \"--classification_head\", action=\"store_true\", help=\"Whether to convert a final classification head.\"\n )\n args = parser.parse_args()\n convert_roberta_checkpoint_to_pytorch(\n args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head\n )\n", "path": "src/transformers/convert_roberta_original_pytorch_checkpoint_to_pytorch.py"}]} | 3,420 | 609 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.