language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def graph_display(self, gprint, node, g, cl):
"""Display a node in cytoscape graph."""
if gprint.function_in_node and self.on_edge:
lbl = gprint.label(node, "")
gprint.cynode(id=node, label=lbl, parent=g, classes=cl)
gprint.process_edges(
[(node, (self.label, "fn-edge"), node.inputs[1])]
)
else:
gprint.process_node_generic(node, g, cl) | def graph_display(self, gprint, node, g, cl):
"""Display a node in cytoscape graph."""
if gprint.function_in_node and self.on_edge:
lbl = gprint.label(node, "")
gprint.cynode(id=node, label=lbl, parent=g, classes=cl)
gprint.process_edges(
[(node, (self.label, "fn-edge"), node.inputs[1])]
)
else:
gprint.process_node_generic(node, g, cl) |
Python | def cosmetic_transformer(g):
"""Transform a graph so that it looks nicer.
The resulting graph is not a valid one to run, because it may contain nodes
with fake functions that only serve a cosmetic purpose.
"""
spec = (
_opt_distributed_constant,
_opt_fancy_make_tuple,
_opt_fancy_getitem,
_opt_fancy_resolve,
_opt_fancy_record_getitem,
_opt_fancy_array_map,
_opt_fancy_distribute,
_opt_fancy_transpose,
_opt_fancy_sum,
_opt_fancy_unsafe_static_cast,
_opt_fancy_scalar_to_array,
_opt_fancy_array_to_scalar,
_opt_fancy_hastag,
_opt_fancy_casttag,
_opt_fancy_tagged,
# careful=True
)
optim = LocalPassOptimizer(*spec)
optim(g)
return g | def cosmetic_transformer(g):
"""Transform a graph so that it looks nicer.
The resulting graph is not a valid one to run, because it may contain nodes
with fake functions that only serve a cosmetic purpose.
"""
spec = (
_opt_distributed_constant,
_opt_fancy_make_tuple,
_opt_fancy_getitem,
_opt_fancy_resolve,
_opt_fancy_record_getitem,
_opt_fancy_array_map,
_opt_fancy_distribute,
_opt_fancy_transpose,
_opt_fancy_sum,
_opt_fancy_unsafe_static_cast,
_opt_fancy_scalar_to_array,
_opt_fancy_array_to_scalar,
_opt_fancy_hastag,
_opt_fancy_casttag,
_opt_fancy_tagged,
# careful=True
)
optim = LocalPassOptimizer(*spec)
optim(g)
return g |
Python | def ensure(cls, prim_or_group):
"""Make sure given object is a primitive or a group of primitives.
Convert a primitive to a PrimGroup if necessary.
:return a valid PrimGroup object
"""
if isinstance(prim_or_group, PrimGroup):
return prim_or_group
assert isinstance(prim_or_group, Primitive)
return cls(None, [prim_or_group]) | def ensure(cls, prim_or_group):
"""Make sure given object is a primitive or a group of primitives.
Convert a primitive to a PrimGroup if necessary.
:return a valid PrimGroup object
"""
if isinstance(prim_or_group, PrimGroup):
return prim_or_group
assert isinstance(prim_or_group, Primitive)
return cls(None, [prim_or_group]) |
Python | def tensor_pytorch_aliasable(v, vseq, path):
"""Aliasing policy whereas all pytorch tensors are aliasable.
Tensors inside a list or ADT are not aliasable.
"""
if isinstance(v, torch.Tensor):
if any(isinstance(x, (list, ADT)) for x in vseq):
return "X"
else:
return True
return False | def tensor_pytorch_aliasable(v, vseq, path):
"""Aliasing policy whereas all pytorch tensors are aliasable.
Tensors inside a list or ADT are not aliasable.
"""
if isinstance(v, torch.Tensor):
if any(isinstance(x, (list, ADT)) for x in vseq):
return "X"
else:
return True
return False |
Python | def _tensordot(g, a, b, *, axes):
"""Tensordot for myia.
This a a generalized version of dot where the
multiplication/contraction can happen on an arbitrary set of axes.
Analogous to np.tensordot.
"""
axes_a, axes_b = axes
axes_a = list(axes_a)
axes_b = list(axes_b)
as_ = a[1]
nda = len(as_)
bs = b[1]
ndb = len(bs)
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = tuple(notin + axes_a)
N1 = 1
for axis in notin:
N1 *= as_[axis]
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (N1, N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = tuple(axes_b + notin)
N1 = 1
for axis in notin:
N1 *= bs[axis]
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, N1)
oldb = [bs[axis] for axis in notin]
at = g.apply(P.reshape, g.apply(P.transpose, a[0], newaxes_a), newshape_a)
bt = g.apply(P.reshape, g.apply(P.transpose, b[0], newaxes_b), newshape_b)
res = g.apply(P.dot, at, bt)
res_shp = tuple(olda + oldb)
return (g.apply(P.reshape, res, res_shp), res_shp) | def _tensordot(g, a, b, *, axes):
"""Tensordot for myia.
This a a generalized version of dot where the
multiplication/contraction can happen on an arbitrary set of axes.
Analogous to np.tensordot.
"""
axes_a, axes_b = axes
axes_a = list(axes_a)
axes_b = list(axes_b)
as_ = a[1]
nda = len(as_)
bs = b[1]
ndb = len(bs)
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = tuple(notin + axes_a)
N1 = 1
for axis in notin:
N1 *= as_[axis]
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (N1, N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = tuple(axes_b + notin)
N1 = 1
for axis in notin:
N1 *= bs[axis]
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, N1)
oldb = [bs[axis] for axis in notin]
at = g.apply(P.reshape, g.apply(P.transpose, a[0], newaxes_a), newshape_a)
bt = g.apply(P.reshape, g.apply(P.transpose, b[0], newaxes_b), newshape_b)
res = g.apply(P.dot, at, bt)
res_shp = tuple(olda + oldb)
return (g.apply(P.reshape, res, res_shp), res_shp) |
Python | def _reduce_transpose(g, input_spec, output_spec, arg):
"""Does a sum-reduction and transpose of a single arguemnt.
It uses input_spec and output_spec (einsum-like strings) to infer
the dimensions to sum over (those that are not in output_spec) and
the ordering of the output.
"""
if input_spec == output_spec:
return arg
out_idx = set(output_spec)
reduce_axes = [i for i, c in enumerate(input_spec) if c not in out_idx]
shp = arg[1]
target_shape = [s if i not in reduce_axes else 1 for i, s in enumerate(shp)]
res = g.apply(P.array_reduce, P.scalar_add, arg[0], tuple(target_shape))
for i in reversed(reduce_axes):
del target_shape[i]
res = g.apply(P.reshape, res, tuple(target_shape))
mid_spec = [c for c in input_spec if c in out_idx]
transpose_pattern = tuple(mid_spec.index(c) for c in output_spec)
res = g.apply(P.transpose, res, transpose_pattern)
final_shape = tuple(target_shape[i] for i in transpose_pattern)
return (res, final_shape) | def _reduce_transpose(g, input_spec, output_spec, arg):
"""Does a sum-reduction and transpose of a single arguemnt.
It uses input_spec and output_spec (einsum-like strings) to infer
the dimensions to sum over (those that are not in output_spec) and
the ordering of the output.
"""
if input_spec == output_spec:
return arg
out_idx = set(output_spec)
reduce_axes = [i for i, c in enumerate(input_spec) if c not in out_idx]
shp = arg[1]
target_shape = [s if i not in reduce_axes else 1 for i, s in enumerate(shp)]
res = g.apply(P.array_reduce, P.scalar_add, arg[0], tuple(target_shape))
for i in reversed(reduce_axes):
del target_shape[i]
res = g.apply(P.reshape, res, tuple(target_shape))
mid_spec = [c for c in input_spec if c in out_idx]
transpose_pattern = tuple(mid_spec.index(c) for c in output_spec)
res = g.apply(P.transpose, res, transpose_pattern)
final_shape = tuple(target_shape[i] for i in transpose_pattern)
return (res, final_shape) |
Python | def _simple_einsum(g, spec, idx_rm, args):
"""Does an einsum operation on one or two arguments.
This doesn't cover all cases that might arise from a full einsum
operation, but should be enough to cover the cases that tensordot
doesn't handle, except for diagonals.
"""
input_spec, output_spec = spec.split("->")
if len(input_spec) == len(set(input_spec)):
# Pure reduce/transpose
assert len(args) == 1
arg = args[0]
return _reduce_transpose(g, input_spec, output_spec, arg)
elif "," in input_spec:
input_list = input_spec.split(",")
assert len(input_list) == 2
a_spec = input_list[0]
b_spec = input_list[1]
a, b = args
av, as_ = a
bv, bs = b
idx_rm = list(idx_rm)
out_shape = []
out_spec = output_spec + "".join(idx_rm)
for c in out_spec:
p = a_spec.find(c)
if p != -1:
out_shape.append(as_[p])
else:
out_shape.append(bs[b_spec.find(c)])
out_shape = tuple(out_shape)
if a_spec != out_spec:
tt = tuple(a_spec.find(c) for c in out_spec if c in a_spec)
av = g.apply(P.transpose, av, tt)
ts = tuple(
out_shape[i] if c in a_spec else 1
for i, c in enumerate(out_spec)
)
av = g.apply(P.reshape, av, ts)
av = g.apply(P.distribute, av, out_shape)
if b_spec != out_spec:
tt = tuple(b_spec.find(c) for c in out_spec if c in b_spec)
bv = g.apply(P.transpose, bv, tt)
ts = tuple(
out_shape[i] if c in b_spec else 1
for i, c in enumerate(out_spec)
)
bv = g.apply(P.reshape, bv, ts)
bv = g.apply(P.distribute, bv, out_shape)
# elemwise
res = (g.apply(P.array_map, P.scalar_mul, av, bv), out_shape)
res_spec = out_spec
return _reduce_transpose(g, res_spec, output_spec, res)
else:
raise InferenceError(f"Can't support this pattern in einsum: {spec}") | def _simple_einsum(g, spec, idx_rm, args):
"""Does an einsum operation on one or two arguments.
This doesn't cover all cases that might arise from a full einsum
operation, but should be enough to cover the cases that tensordot
doesn't handle, except for diagonals.
"""
input_spec, output_spec = spec.split("->")
if len(input_spec) == len(set(input_spec)):
# Pure reduce/transpose
assert len(args) == 1
arg = args[0]
return _reduce_transpose(g, input_spec, output_spec, arg)
elif "," in input_spec:
input_list = input_spec.split(",")
assert len(input_list) == 2
a_spec = input_list[0]
b_spec = input_list[1]
a, b = args
av, as_ = a
bv, bs = b
idx_rm = list(idx_rm)
out_shape = []
out_spec = output_spec + "".join(idx_rm)
for c in out_spec:
p = a_spec.find(c)
if p != -1:
out_shape.append(as_[p])
else:
out_shape.append(bs[b_spec.find(c)])
out_shape = tuple(out_shape)
if a_spec != out_spec:
tt = tuple(a_spec.find(c) for c in out_spec if c in a_spec)
av = g.apply(P.transpose, av, tt)
ts = tuple(
out_shape[i] if c in a_spec else 1
for i, c in enumerate(out_spec)
)
av = g.apply(P.reshape, av, ts)
av = g.apply(P.distribute, av, out_shape)
if b_spec != out_spec:
tt = tuple(b_spec.find(c) for c in out_spec if c in b_spec)
bv = g.apply(P.transpose, bv, tt)
ts = tuple(
out_shape[i] if c in b_spec else 1
for i, c in enumerate(out_spec)
)
bv = g.apply(P.reshape, bv, ts)
bv = g.apply(P.distribute, bv, out_shape)
# elemwise
res = (g.apply(P.array_map, P.scalar_mul, av, bv), out_shape)
res_spec = out_spec
return _reduce_transpose(g, res_spec, output_spec, res)
else:
raise InferenceError(f"Can't support this pattern in einsum: {spec}") |
Python | async def einsum(info, r_spec, *r_args):
"""Macro implementation for 'einsum'."""
_, *args = await info.abstracts()
spec = await info.build(r_spec)
shapes = tuple(a.xshape() for a in args)
try:
path = contract_expression(
spec, *shapes, optimize="dynamic-programming"
)
except ValueError as e:
raise InferenceError(*e.args)
g = info.graph
nodes = [(a.node, sh) for a, sh in zip(r_args, shapes)]
for contraction in path.contraction_list:
inds, idx_rm, einsum_str, remaining, blas_flag = contraction
tmp_nodes = [nodes.pop(x) for x in inds]
if blas_flag:
input_str, result_index = einsum_str.split("->")
input_left, input_right = input_str.split(",")
tensor_result = "".join(
s for s in input_left + input_right if s not in idx_rm
)
left_pos, right_pos = [], []
for s in idx_rm:
left_pos.append(input_left.find(s))
right_pos.append(input_right.find(s))
new_node = _tensordot(
g, *tmp_nodes, axes=(tuple(left_pos), tuple(right_pos))
)
if tensor_result != result_index:
transpose = tuple(map(tensor_result.index, result_index))
new_node = (
g.apply(P.transpose, new_node[0], tuple(transpose)),
tuple(new_node[1][i] for i in transpose),
)
else:
new_node = _simple_einsum(g, einsum_str, idx_rm, tmp_nodes)
nodes.append(new_node)
return nodes[0][0] | async def einsum(info, r_spec, *r_args):
"""Macro implementation for 'einsum'."""
_, *args = await info.abstracts()
spec = await info.build(r_spec)
shapes = tuple(a.xshape() for a in args)
try:
path = contract_expression(
spec, *shapes, optimize="dynamic-programming"
)
except ValueError as e:
raise InferenceError(*e.args)
g = info.graph
nodes = [(a.node, sh) for a, sh in zip(r_args, shapes)]
for contraction in path.contraction_list:
inds, idx_rm, einsum_str, remaining, blas_flag = contraction
tmp_nodes = [nodes.pop(x) for x in inds]
if blas_flag:
input_str, result_index = einsum_str.split("->")
input_left, input_right = input_str.split(",")
tensor_result = "".join(
s for s in input_left + input_right if s not in idx_rm
)
left_pos, right_pos = [], []
for s in idx_rm:
left_pos.append(input_left.find(s))
right_pos.append(input_right.find(s))
new_node = _tensordot(
g, *tmp_nodes, axes=(tuple(left_pos), tuple(right_pos))
)
if tensor_result != result_index:
transpose = tuple(map(tensor_result.index, result_index))
new_node = (
g.apply(P.transpose, new_node[0], tuple(transpose)),
tuple(new_node[1][i] for i in transpose),
)
else:
new_node = _simple_einsum(g, einsum_str, idx_rm, tmp_nodes)
nodes.append(new_node)
return nodes[0][0] |
Python | def analyze_function(self, a, fn, argvals):
"""Analyze a function for the collect phase.
Arguments:
a: The abstract value for the function.
fn: The Function object, equivalent a.get_unique().
argvals: The abstract arguments given to the function.
Returns:
ct: A Constant to use for this call.
ctx: The context for this call, or None
norm_ctx: The normalized context for this call, or None
"""
inf = self.engine.get_inferrer_for(fn)
argvals = argvals and inf.normalize_args_sync(argvals)
argvals, outval = self._find_unique_argvals(a, inf, argvals)
if isinstance(inf, TrackedInferrer):
fn = dc_replace(fn, tracking_id=None)
inf = self.engine.get_inferrer_for(fn)
if isinstance(fn, PrimitiveFunction):
tfn = TypedPrimitive(fn.prim, argvals, outval)
a = AbstractFunction(tfn)
return tfn, _const(fn.prim, a), None, None
assert isinstance(inf, GraphInferrer)
concretize_cache(inf.graph_cache)
ctx = inf.make_context(self.engine, argvals)
norm_ctx = _normalize_context(ctx)
new_ct = _const(_Placeholder(norm_ctx), None)
return None, new_ct, ctx, norm_ctx | def analyze_function(self, a, fn, argvals):
"""Analyze a function for the collect phase.
Arguments:
a: The abstract value for the function.
fn: The Function object, equivalent a.get_unique().
argvals: The abstract arguments given to the function.
Returns:
ct: A Constant to use for this call.
ctx: The context for this call, or None
norm_ctx: The normalized context for this call, or None
"""
inf = self.engine.get_inferrer_for(fn)
argvals = argvals and inf.normalize_args_sync(argvals)
argvals, outval = self._find_unique_argvals(a, inf, argvals)
if isinstance(inf, TrackedInferrer):
fn = dc_replace(fn, tracking_id=None)
inf = self.engine.get_inferrer_for(fn)
if isinstance(fn, PrimitiveFunction):
tfn = TypedPrimitive(fn.prim, argvals, outval)
a = AbstractFunction(tfn)
return tfn, _const(fn.prim, a), None, None
assert isinstance(inf, GraphInferrer)
concretize_cache(inf.graph_cache)
ctx = inf.make_context(self.engine, argvals)
norm_ctx = _normalize_context(ctx)
new_ct = _const(_Placeholder(norm_ctx), None)
return None, new_ct, ctx, norm_ctx |
Python | def collect(self, root_context):
"""Collect all the available contexts.
Sets self.specializations to a dict from a normalized context (which we
must generate a graph for) to the original context (cached in the
inferrer in a possibly unnormalized form that contains Pendings).
Also sets self.replacements to a context->(node,key)->new_node dict.
When an inferrer's reroute function tells the inference engine that
some node is equivalent to another, and to use that other node to
resume inference, this is reflected in self.replacements.
"""
root = root_context.graph
todo = [
_TodoEntry(self.engine.ref(root.return_, root_context), None, None)
]
seen = set()
self.specializations[root_context] = root_context
while todo:
entry = todo.pop()
if entry in seen:
continue
seen.add(entry)
# Get the proper reference
ref = self.engine.get_actual_ref(entry.ref)
a = concretize_abstract(ref.get_resolved())
if entry.link is not None:
with About(ref.node.debug, "equiv"):
ct = _build(a)
if ct is not None:
ref = self.engine.ref(ct, ref.context)
new_node = ref.node
if ref.node.is_apply():
# Grab the argvals
irefs = [
self.engine.ref(inp, entry.ref.context)
for inp in ref.node.inputs
]
absfn = concretize_abstract(irefs[0].get_resolved())
argvals = [
concretize_abstract(iref.get_resolved())
for iref in irefs[1:]
]
prim = absfn.get_prim()
method = None
if prim is not None:
method = getattr(self, f"_special_{prim}", None)
if method is not None:
method(todo, ref, irefs, argvals)
else:
# Keep traversing the graph. Element 0 is special.
todo.append(_TodoEntry(irefs[0], tuple(argvals), (ref, 0)))
# Iterate through the rest of the inputs
for i, iref in enumerate(irefs[1:]):
todo.append(_TodoEntry(iref, None, (ref, i + 1)))
elif (
ref.node.is_constant_graph()
or ref.node.is_constant(MetaGraph)
or ref.node.is_constant(Primitive)
):
if ref.node.is_constant_graph():
ctabs = ref.node.value.abstract
else:
ctabs = ref.node.abstract
if ctabs is None or not isinstance(
ctabs, AbstractFunctionUnique
):
fn = a.get_unique()
with About(ref.node.debug, "equiv"):
try:
(
_,
new_node,
ctx,
norm_ctx,
) = self.finder.analyze_function(
a, fn, entry.argvals
)
if (
norm_ctx
and norm_ctx not in self.specializations
):
self.specializations[norm_ctx] = ctx
except Unspecializable as e:
aerr = AbstractError(e.problem, e.data)
new_node = _const(e.problem, aerr)
else:
if isinstance(fn, GraphFunction):
self.ctcache[ref.node] = norm_ctx
if fn.tracking_id:
self.ctcache[fn.tracking_id] = norm_ctx
if norm_ctx is not None:
retref = self.engine.ref(
norm_ctx.graph.return_, norm_ctx
)
todo.append(_TodoEntry(retref, None, None))
if new_node is not entry.ref.node:
if entry.link is None:
raise AssertionError("Cannot replace a return node.")
else:
ref, _ = entry.link
nctx = _normalize_context(ref.context)
self.replacements[nctx][entry.link] = new_node | def collect(self, root_context):
"""Collect all the available contexts.
Sets self.specializations to a dict from a normalized context (which we
must generate a graph for) to the original context (cached in the
inferrer in a possibly unnormalized form that contains Pendings).
Also sets self.replacements to a context->(node,key)->new_node dict.
When an inferrer's reroute function tells the inference engine that
some node is equivalent to another, and to use that other node to
resume inference, this is reflected in self.replacements.
"""
root = root_context.graph
todo = [
_TodoEntry(self.engine.ref(root.return_, root_context), None, None)
]
seen = set()
self.specializations[root_context] = root_context
while todo:
entry = todo.pop()
if entry in seen:
continue
seen.add(entry)
# Get the proper reference
ref = self.engine.get_actual_ref(entry.ref)
a = concretize_abstract(ref.get_resolved())
if entry.link is not None:
with About(ref.node.debug, "equiv"):
ct = _build(a)
if ct is not None:
ref = self.engine.ref(ct, ref.context)
new_node = ref.node
if ref.node.is_apply():
# Grab the argvals
irefs = [
self.engine.ref(inp, entry.ref.context)
for inp in ref.node.inputs
]
absfn = concretize_abstract(irefs[0].get_resolved())
argvals = [
concretize_abstract(iref.get_resolved())
for iref in irefs[1:]
]
prim = absfn.get_prim()
method = None
if prim is not None:
method = getattr(self, f"_special_{prim}", None)
if method is not None:
method(todo, ref, irefs, argvals)
else:
# Keep traversing the graph. Element 0 is special.
todo.append(_TodoEntry(irefs[0], tuple(argvals), (ref, 0)))
# Iterate through the rest of the inputs
for i, iref in enumerate(irefs[1:]):
todo.append(_TodoEntry(iref, None, (ref, i + 1)))
elif (
ref.node.is_constant_graph()
or ref.node.is_constant(MetaGraph)
or ref.node.is_constant(Primitive)
):
if ref.node.is_constant_graph():
ctabs = ref.node.value.abstract
else:
ctabs = ref.node.abstract
if ctabs is None or not isinstance(
ctabs, AbstractFunctionUnique
):
fn = a.get_unique()
with About(ref.node.debug, "equiv"):
try:
(
_,
new_node,
ctx,
norm_ctx,
) = self.finder.analyze_function(
a, fn, entry.argvals
)
if (
norm_ctx
and norm_ctx not in self.specializations
):
self.specializations[norm_ctx] = ctx
except Unspecializable as e:
aerr = AbstractError(e.problem, e.data)
new_node = _const(e.problem, aerr)
else:
if isinstance(fn, GraphFunction):
self.ctcache[ref.node] = norm_ctx
if fn.tracking_id:
self.ctcache[fn.tracking_id] = norm_ctx
if norm_ctx is not None:
retref = self.engine.ref(
norm_ctx.graph.return_, norm_ctx
)
todo.append(_TodoEntry(retref, None, None))
if new_node is not entry.ref.node:
if entry.link is None:
raise AssertionError("Cannot replace a return node.")
else:
ref, _ = entry.link
nctx = _normalize_context(ref.context)
self.replacements[nctx][entry.link] = new_node |
Python | def order_tasks(self):
"""Create an ordered list of "tasks" to perform into self.tasks.
Each task is a context/original_context pair. They are ordered such
that context.parent comes before context. That way, when copying
children graphs, their parent graphs will have also been copied, so we
can access their free variables.
"""
seen = set()
self.tasks = []
def _process_ctx(ctx, orig_ctx):
if ctx in seen or ctx in self.results:
return
self.infer_manager.add_graph(ctx.graph, root=True)
seen.add(ctx)
if ctx.parent != Context.empty():
orig_parent_ctx = self.specializations[ctx.parent]
_process_ctx(ctx.parent, orig_parent_ctx)
self.tasks.append([ctx, orig_ctx])
for ctx, orig_ctx in self.specializations.items():
_process_ctx(ctx, orig_ctx) | def order_tasks(self):
"""Create an ordered list of "tasks" to perform into self.tasks.
Each task is a context/original_context pair. They are ordered such
that context.parent comes before context. That way, when copying
children graphs, their parent graphs will have also been copied, so we
can access their free variables.
"""
seen = set()
self.tasks = []
def _process_ctx(ctx, orig_ctx):
if ctx in seen or ctx in self.results:
return
self.infer_manager.add_graph(ctx.graph, root=True)
seen.add(ctx)
if ctx.parent != Context.empty():
orig_parent_ctx = self.specializations[ctx.parent]
_process_ctx(ctx.parent, orig_parent_ctx)
self.tasks.append([ctx, orig_ctx])
for ctx, orig_ctx in self.specializations.items():
_process_ctx(ctx, orig_ctx) |
Python | def create_graphs(self):
"""Create the (empty) graphs associated to the contexts."""
for entry in self.tasks:
ctx, orig_ctx = entry
newgraph = ctx.graph.make_new(relation=next(_count))
newgraph.set_flags(reference=False)
self.results[ctx] = newgraph
entry.append(newgraph) | def create_graphs(self):
"""Create the (empty) graphs associated to the contexts."""
for entry in self.tasks:
ctx, orig_ctx = entry
newgraph = ctx.graph.make_new(relation=next(_count))
newgraph.set_flags(reference=False)
self.results[ctx] = newgraph
entry.append(newgraph) |
Python | def monomorphize(self):
"""Create the monomorphized graphs.
For each context in the computed order:
1. Rewire the original graph according to the reroutings of various
nodes suggested by the inferrer.
2. If monomorphizing the original, set node.abstract for all of its
nodes and reroute the free variables to the right monomorphized
parent. Get the next context and goto 1.
3. If not monomorphizing the original, clone it using _MonoRemapper.
_MonoRemapper will clone the graph as normal, except for its free
variables which will be connected to those of the right parent.
4. Set node.abstract for all of the cloned graph's nodes.
5. Undo the modifications on the original graph.
"""
m = self.infer_manager
cloners = {}
for ctx, orig_ctx, newgraph in self.tasks:
def fv_function(fv, ctx=ctx):
fv_ctx = ctx.filter(fv.graph)
assert fv_ctx in cloners
return cloners[fv_ctx][fv]
# Rewire the graph to clone
with m.transact() as tr:
for (ref, key), repl in self.replacements[ctx].items():
tr.set_edge(ref.node, key, repl)
# Clone the graph
cl = GraphCloner(
ctx.graph,
total=False,
clone_children=False,
clone_constants=True,
graph_repl={ctx.graph: newgraph},
remapper_class=_MonoRemapper.partial(
engine=self.engine, fv_function=fv_function
),
)
assert cl[ctx.graph] is newgraph
cloners[ctx] = cl
# Populate the abstract field
for old_node, new_node in cl.remapper.repl.items():
if isinstance(old_node, tuple):
old_node = old_node[1]
self.invmap[new_node] = self.engine.ref(old_node, orig_ctx)
# Undo changes to the original graph
tr.undo() | def monomorphize(self):
"""Create the monomorphized graphs.
For each context in the computed order:
1. Rewire the original graph according to the reroutings of various
nodes suggested by the inferrer.
2. If monomorphizing the original, set node.abstract for all of its
nodes and reroute the free variables to the right monomorphized
parent. Get the next context and goto 1.
3. If not monomorphizing the original, clone it using _MonoRemapper.
_MonoRemapper will clone the graph as normal, except for its free
variables which will be connected to those of the right parent.
4. Set node.abstract for all of the cloned graph's nodes.
5. Undo the modifications on the original graph.
"""
m = self.infer_manager
cloners = {}
for ctx, orig_ctx, newgraph in self.tasks:
def fv_function(fv, ctx=ctx):
fv_ctx = ctx.filter(fv.graph)
assert fv_ctx in cloners
return cloners[fv_ctx][fv]
# Rewire the graph to clone
with m.transact() as tr:
for (ref, key), repl in self.replacements[ctx].items():
tr.set_edge(ref.node, key, repl)
# Clone the graph
cl = GraphCloner(
ctx.graph,
total=False,
clone_children=False,
clone_constants=True,
graph_repl={ctx.graph: newgraph},
remapper_class=_MonoRemapper.partial(
engine=self.engine, fv_function=fv_function
),
)
assert cl[ctx.graph] is newgraph
cloners[ctx] = cl
# Populate the abstract field
for old_node, new_node in cl.remapper.repl.items():
if isinstance(old_node, tuple):
old_node = old_node[1]
self.invmap[new_node] = self.engine.ref(old_node, orig_ctx)
# Undo changes to the original graph
tr.undo() |
Python | def fill_placeholders(self):
"""Replace all placeholder constants with monomorphized graphs.
The placeholders were created during the collect phase, since the
monomorphized graphs are unavailable at that stage. They contain the
context.
This procedure will not work on a managed graph because it changes
constants directly, therefore the manager is cleared entirely before
doing the procedure.
"""
for ctx, orig_ctx, g in self.tasks:
for node in dfs(g.return_, succ=succ_incoming):
if node.is_constant(_Placeholder):
node.value = self.results[node.value.context]
node.abstract = AbstractFunction(
GraphFunction(node.value, Context.empty())
) | def fill_placeholders(self):
"""Replace all placeholder constants with monomorphized graphs.
The placeholders were created during the collect phase, since the
monomorphized graphs are unavailable at that stage. They contain the
context.
This procedure will not work on a managed graph because it changes
constants directly, therefore the manager is cleared entirely before
doing the procedure.
"""
for ctx, orig_ctx, g in self.tasks:
for node in dfs(g.return_, succ=succ_incoming):
if node.is_constant(_Placeholder):
node.value = self.results[node.value.context]
node.abstract = AbstractFunction(
GraphFunction(node.value, Context.empty())
) |
Python | def gen_constant_graph(self, g, ng, ct):
"""Constant graphs that get through here cannot be reachable."""
assert ct.value.abstract is None
with About(ct.debug, self.relation):
new = _const(DEAD, AbstractError(DEAD))
self.remap_node(ct, g, ct, ng, new) | def gen_constant_graph(self, g, ng, ct):
"""Constant graphs that get through here cannot be reachable."""
assert ct.value.abstract is None
with About(ct.debug, self.relation):
new = _const(DEAD, AbstractError(DEAD))
self.remap_node(ct, g, ct, ng, new) |
Python | def gen_fv_direct(self, g, ng, fv):
"""Remap the free variables we want to remap."""
new = self.fv_function(fv)
if new is not None:
self.remap_node((g, fv), g, fv, ng, new, link=False) | def gen_fv_direct(self, g, ng, fv):
"""Remap the free variables we want to remap."""
new = self.fv_function(fv)
if new is not None:
self.remap_node((g, fv), g, fv, ng, new, link=False) |
Python | async def infer_shape(self, engine, a: AbstractArray):
"""Infer the return type of primitive `shape`."""
shp = await force_pending(a.xshape())
values = [
AbstractScalar({VALUE: entry, TYPE: xtype.UInt[64]}) for entry in shp
]
return AbstractTuple(values) | async def infer_shape(self, engine, a: AbstractArray):
"""Infer the return type of primitive `shape`."""
shp = await force_pending(a.xshape())
values = [
AbstractScalar({VALUE: entry, TYPE: xtype.UInt[64]}) for entry in shp
]
return AbstractTuple(values) |
Python | async def infer_max_pool2d(
self,
engine,
input: lib.AbstractArray,
kernel_size: lib.u64tup_typecheck,
stride: lib.u64tup_typecheck,
padding: lib.u64tup_typecheck,
dilation: lib.u64tup_typecheck,
ceil_mode: xtype.Bool,
):
"""Infer the return type of primitive `max_pool2d`."""
# TODO: _shape_type should not allow float to be converted to uint
# TODO: support ceil_mode == True
assert ceil_mode.xvalue() is False
h_in, w_in = input.xshape()[2:]
kernel_size = tuple(
self.require_constant(e, argnum=f'"1:kernel_size[{edx}]"')
for edx, e in enumerate(kernel_size.elements)
)
stride = tuple(
self.require_constant(e, argnum=f'"2:stride[{edx}]"')
for edx, e in enumerate(stride.elements)
)
padding = tuple(
self.require_constant(e, argnum=f'"3:padding[{edx}]"')
for edx, e in enumerate(padding.elements)
)
dilation = tuple(
self.require_constant(e, argnum=f'"4:dilation[{edx}]"')
for edx, e in enumerate(dilation.elements)
)
N = input.xshape()[0]
C_out = input.xshape()[1]
# Based on formulae in shape section of:
# https://pytorch.org/docs/stable/nn.html#torch.nn.MaxPool2d
H_out = (
(h_in + 2 * padding[0] - dilation[0] * (kernel_size[0] - 1) - 1)
// stride[0]
) + 1
W_out = (
(w_in + 2 * padding[1] - dilation[1] * (kernel_size[1] - 1) - 1)
// stride[1]
) + 1
out_shape = (N, C_out, int(H_out), int(W_out))
return type(input)(input.element, {SHAPE: out_shape, TYPE: input.xtype()}) | async def infer_max_pool2d(
self,
engine,
input: lib.AbstractArray,
kernel_size: lib.u64tup_typecheck,
stride: lib.u64tup_typecheck,
padding: lib.u64tup_typecheck,
dilation: lib.u64tup_typecheck,
ceil_mode: xtype.Bool,
):
"""Infer the return type of primitive `max_pool2d`."""
# TODO: _shape_type should not allow float to be converted to uint
# TODO: support ceil_mode == True
assert ceil_mode.xvalue() is False
h_in, w_in = input.xshape()[2:]
kernel_size = tuple(
self.require_constant(e, argnum=f'"1:kernel_size[{edx}]"')
for edx, e in enumerate(kernel_size.elements)
)
stride = tuple(
self.require_constant(e, argnum=f'"2:stride[{edx}]"')
for edx, e in enumerate(stride.elements)
)
padding = tuple(
self.require_constant(e, argnum=f'"3:padding[{edx}]"')
for edx, e in enumerate(padding.elements)
)
dilation = tuple(
self.require_constant(e, argnum=f'"4:dilation[{edx}]"')
for edx, e in enumerate(dilation.elements)
)
N = input.xshape()[0]
C_out = input.xshape()[1]
# Based on formulae in shape section of:
# https://pytorch.org/docs/stable/nn.html#torch.nn.MaxPool2d
H_out = (
(h_in + 2 * padding[0] - dilation[0] * (kernel_size[0] - 1) - 1)
// stride[0]
) + 1
W_out = (
(w_in + 2 * padding[1] - dilation[1] * (kernel_size[1] - 1) - 1)
// stride[1]
) + 1
out_shape = (N, C_out, int(H_out), int(W_out))
return type(input)(input.element, {SHAPE: out_shape, TYPE: input.xtype()}) |
Python | def insert_after(self, base_step, *more_steps):
"""Insert new steps after the given step.
This returns a new Pipeline.
"""
idx = self.steps.index(base_step) + 1
return self.with_steps(*self[:idx], *more_steps, *self[idx:]) | def insert_after(self, base_step, *more_steps):
"""Insert new steps after the given step.
This returns a new Pipeline.
"""
idx = self.steps.index(base_step) + 1
return self.with_steps(*self[:idx], *more_steps, *self[idx:]) |
Python | def make_transformer(self, in_key, out_key):
"""Create a callable for specific input and output keys.
Arguments:
in_key: The name of the pipeline input to use for the
callable's argument.
out_key: The name of the pipeline output to return.
"""
def run(arg):
res = self(**{in_key: arg})
return res[out_key]
return run | def make_transformer(self, in_key, out_key):
"""Create a callable for specific input and output keys.
Arguments:
in_key: The name of the pipeline input to use for the
callable's argument.
out_key: The name of the pipeline output to return.
"""
def run(arg):
res = self(**{in_key: arg})
return res[out_key]
return run |
Python | def _call(self, fn, kwargs):
"""Execute one of the steps on the kwargs."""
step_name = _nameof(fn, str(self.steps.index(fn)))
with tracer(step_name, step=fn, **kwargs) as tr:
try:
if not isinstance(fn, FunctionType):
fn = fn.__call__
valid_args, rest = partition_keywords(fn, kwargs)
results = fn(**valid_args)
if not isinstance(results, dict) and len(valid_args) == 1:
(field_name,) = valid_args.keys()
results = {field_name: results}
kwargs = {**kwargs, **results}
tr.set_results(**kwargs)
except Exception as err:
tracer().emit_error(error=err)
raise
return kwargs | def _call(self, fn, kwargs):
"""Execute one of the steps on the kwargs."""
step_name = _nameof(fn, str(self.steps.index(fn)))
with tracer(step_name, step=fn, **kwargs) as tr:
try:
if not isinstance(fn, FunctionType):
fn = fn.__call__
valid_args, rest = partition_keywords(fn, kwargs)
results = fn(**valid_args)
if not isinstance(results, dict) and len(valid_args) == 1:
(field_name,) = valid_args.keys()
results = {field_name: results}
kwargs = {**kwargs, **results}
tr.set_results(**kwargs)
except Exception as err:
tracer().emit_error(error=err)
raise
return kwargs |
Python | def with_resources(self, resources):
"""Return a Pipeline using the given resources."""
return type(self)(
*self,
resources=resources,
arguments=self.arguments,
name=self.name,
) | def with_resources(self, resources):
"""Return a Pipeline using the given resources."""
return type(self)(
*self,
resources=resources,
arguments=self.arguments,
name=self.name,
) |
Python | async def conv2d_grad_input(
info,
r_input_size,
r_weight,
r_grad_output,
r_stride,
r_padding,
r_dilation,
r_groups,
):
"""Return a new Apply calling conv_transpose2d with right arguments."""
_input_size = await r_input_size.get() # type: AbstractTuple
_weight = await r_weight.get() # type: AbstractArray
_grad_output = await r_grad_output.get() # type: AbstractArray
_stride = await r_stride.get() # type: AbstractTuple
_padding = await r_padding.get() # type: AbstractTuple
_dilation = await r_dilation.get() # type: AbstractTuple
input_size = _get_int_tuple(_input_size)
stride = _get_int_tuple(_stride)
padding = _get_int_tuple(_padding)
dilation = _get_int_tuple(_dilation)
weight_shape = _weight.xshape()
grad_output_shape = _grad_output.xshape()
kernel_size = (weight_shape[2], weight_shape[3])
# Compute grad input padding.
# For a 2D convolution, tensors should have 4 dimensions.
assert len(grad_output_shape) == 4
assert len(input_size) == 4
k = len(grad_output_shape) - 2
input_size = input_size[-k:]
min_sizes = []
for d in range(k):
min_sizes.append(
(grad_output_shape[d + 2] - 1) * stride[d]
- 2 * padding[d]
+ (kernel_size[d] - 1) * dilation[d]
+ 1
)
# Let's avoid checking minimum and maximum size here.
# Backends should check it when relevant.
grad_input_padding = tuple(input_size[d] - min_sizes[d] for d in range(k))
# End computing.
g = info.graph
return g.apply(
P.conv_transpose2d,
r_grad_output.node,
r_weight.node,
r_stride.node,
r_padding.node,
Constant(grad_input_padding),
r_groups.node,
r_dilation.node,
) | async def conv2d_grad_input(
info,
r_input_size,
r_weight,
r_grad_output,
r_stride,
r_padding,
r_dilation,
r_groups,
):
"""Return a new Apply calling conv_transpose2d with right arguments."""
_input_size = await r_input_size.get() # type: AbstractTuple
_weight = await r_weight.get() # type: AbstractArray
_grad_output = await r_grad_output.get() # type: AbstractArray
_stride = await r_stride.get() # type: AbstractTuple
_padding = await r_padding.get() # type: AbstractTuple
_dilation = await r_dilation.get() # type: AbstractTuple
input_size = _get_int_tuple(_input_size)
stride = _get_int_tuple(_stride)
padding = _get_int_tuple(_padding)
dilation = _get_int_tuple(_dilation)
weight_shape = _weight.xshape()
grad_output_shape = _grad_output.xshape()
kernel_size = (weight_shape[2], weight_shape[3])
# Compute grad input padding.
# For a 2D convolution, tensors should have 4 dimensions.
assert len(grad_output_shape) == 4
assert len(input_size) == 4
k = len(grad_output_shape) - 2
input_size = input_size[-k:]
min_sizes = []
for d in range(k):
min_sizes.append(
(grad_output_shape[d + 2] - 1) * stride[d]
- 2 * padding[d]
+ (kernel_size[d] - 1) * dilation[d]
+ 1
)
# Let's avoid checking minimum and maximum size here.
# Backends should check it when relevant.
grad_input_padding = tuple(input_size[d] - min_sizes[d] for d in range(k))
# End computing.
g = info.graph
return g.apply(
P.conv_transpose2d,
r_grad_output.node,
r_weight.node,
r_stride.node,
r_padding.node,
Constant(grad_input_padding),
r_groups.node,
r_dilation.node,
) |
Python | def mixin(target):
"""Class decorator to add methods to the target class."""
def apply(cls):
methods = set(dir(cls))
methods.difference_update(set(dir(_Empty)))
for method_name in methods:
mthd = getattr(cls, method_name)
if isinstance(mthd, types.MethodType):
mthd = classmethod(mthd.__func__)
setattr(target, method_name, mthd)
return target
return apply | def mixin(target):
"""Class decorator to add methods to the target class."""
def apply(cls):
methods = set(dir(cls))
methods.difference_update(set(dir(_Empty)))
for method_name in methods:
mthd = getattr(cls, method_name)
if isinstance(mthd, types.MethodType):
mthd = classmethod(mthd.__func__)
setattr(target, method_name, mthd)
return target
return apply |
Python | async def infer_env_setitem(
self, engine, env: xtype.EnvType, key: xtype.SymbolicKeyType, value
):
"""Infer the return type of primitive `env_setitem`."""
expected = key.xvalue().abstract
engine.abstract_merge(expected, value)
return AbstractScalar({VALUE: ANYTHING, TYPE: xtype.EnvType}) | async def infer_env_setitem(
self, engine, env: xtype.EnvType, key: xtype.SymbolicKeyType, value
):
"""Infer the return type of primitive `env_setitem`."""
expected = key.xvalue().abstract
engine.abstract_merge(expected, value)
return AbstractScalar({VALUE: ANYTHING, TYPE: xtype.EnvType}) |
Python | async def normalize_args(self, args):
"""Return normalized versions of the arguments.
By default, this returns args unchanged.
"""
return self.normalize_args_sync(args) | async def normalize_args(self, args):
"""Return normalized versions of the arguments.
By default, this returns args unchanged.
"""
return self.normalize_args_sync(args) |
Python | def make_signature(self, args):
"""Return a signature corresponding to the args.
Each signature corresponds to a graph.
"""
return args | def make_signature(self, args):
"""Return a signature corresponding to the args.
Each signature corresponds to a graph.
"""
return args |
Python | def register(self, *types):
"""Register a function for the given type signature."""
def deco(fn):
atypes = tuple(abstract.type_to_abstract(t) for t in types)
self.entries.append((atypes, fn))
return fn
return deco | def register(self, *types):
"""Register a function for the given type signature."""
def deco(fn):
atypes = tuple(abstract.type_to_abstract(t) for t in types)
self.entries.append((atypes, fn))
return fn
return deco |
Python | async def infer_array_getitem(
self,
engine,
a: lib.AbstractArray,
begin: lib.u64tup_typecheck,
end: lib.u64tup_typecheck,
strides: lib.i64tup_typecheck,
):
"""Infer the return type of primitive `array_getitem`."""
begin = tuple(
self.require_constant(e, argnum=f'"1:begin[{edx}]"')
for edx, e in enumerate(begin.elements)
)
end = tuple(
self.require_constant(e, argnum=f'"2:end[{edx}]"')
for edx, e in enumerate(end.elements)
)
strides = tuple(
self.require_constant(e, argnum=f'"3:strides[{edx}]"')
for edx, e in enumerate(strides.elements)
)
shp_before_stride = map(operator.sub, end, begin)
shp = tuple(map(_ceildiv, shp_before_stride, map(abs, strides)))
return type(a)(a.element, {SHAPE: shp, TYPE: a.xtype()}) | async def infer_array_getitem(
self,
engine,
a: lib.AbstractArray,
begin: lib.u64tup_typecheck,
end: lib.u64tup_typecheck,
strides: lib.i64tup_typecheck,
):
"""Infer the return type of primitive `array_getitem`."""
begin = tuple(
self.require_constant(e, argnum=f'"1:begin[{edx}]"')
for edx, e in enumerate(begin.elements)
)
end = tuple(
self.require_constant(e, argnum=f'"2:end[{edx}]"')
for edx, e in enumerate(end.elements)
)
strides = tuple(
self.require_constant(e, argnum=f'"3:strides[{edx}]"')
for edx, e in enumerate(strides.elements)
)
shp_before_stride = map(operator.sub, end, begin)
shp = tuple(map(_ceildiv, shp_before_stride, map(abs, strides)))
return type(a)(a.element, {SHAPE: shp, TYPE: a.xtype()}) |
Python | def bprop_array_getitem(data, begin, end, strides, out, dout):
"""Backpropagator for primitive `array_getitem`."""
return (
array_setitem(zeros_like(data), begin, end, strides, dout),
zeros_like(begin),
zeros_like(end),
zeros_like(strides),
) | def bprop_array_getitem(data, begin, end, strides, out, dout):
"""Backpropagator for primitive `array_getitem`."""
return (
array_setitem(zeros_like(data), begin, end, strides, dout),
zeros_like(begin),
zeros_like(end),
zeros_like(strides),
) |
Python | async def infer_record_setitem(
self, engine, data: lib.AbstractClassBase, attr: xtype.String, value
):
"""Infer the return type of primitive `record_setitem`."""
attr_v = self.require_constant(attr, argnum=2)
if attr_v not in data.attributes:
raise MyiaAttributeError(f"Unknown field in {data}: {attr_v}")
model = data.user_defined_version()
expected = model.attributes[attr_v]
if not typecheck(expected, value):
raise MyiaTypeError(f"Expected field {attr_v} to have type {expected}")
return type(data)(
data.tag,
{**data.attributes, attr_v: value},
constructor=data.constructor,
) | async def infer_record_setitem(
self, engine, data: lib.AbstractClassBase, attr: xtype.String, value
):
"""Infer the return type of primitive `record_setitem`."""
attr_v = self.require_constant(attr, argnum=2)
if attr_v not in data.attributes:
raise MyiaAttributeError(f"Unknown field in {data}: {attr_v}")
model = data.user_defined_version()
expected = model.attributes[attr_v]
if not typecheck(expected, value):
raise MyiaTypeError(f"Expected field {attr_v} to have type {expected}")
return type(data)(
data.tag,
{**data.attributes, attr_v: value},
constructor=data.constructor,
) |
Python | def print_inference_error(error, file=sys.stderr):
"""Print an InferenceError's traceback."""
refs = [*error.traceback_refs.values()] + error.refs
for ref in refs:
if not skip_ref(ref):
print("=" * 80, file=file)
print_ref(ref, file=file)
print("~" * 80, file=file)
if error.pytb:
print(error.pytb, file=file)
else:
print(f"{type(error).__name__}: {error.message}", file=file) | def print_inference_error(error, file=sys.stderr):
"""Print an InferenceError's traceback."""
refs = [*error.traceback_refs.values()] + error.refs
for ref in refs:
if not skip_ref(ref):
print("=" * 80, file=file)
print_ref(ref, file=file)
print("~" * 80, file=file)
if error.pytb:
print(error.pytb, file=file)
else:
print(f"{type(error).__name__}: {error.message}", file=file) |
Python | def print_myia_warning(warning, file=sys.stderr):
"""Print Myia Warning's location."""
msg = warning.args[0]
loc = warning.loc
print("=" * 80, file=file)
if loc is not None:
print(f"{loc.filename}:{loc.line}", file=file)
if loc is not None:
_show_location(loc, "", None, "MAGENTA", file=file)
print("~" * 80, file=file)
print(f"{warning.__class__.__name__}: {msg}", file=file) | def print_myia_warning(warning, file=sys.stderr):
"""Print Myia Warning's location."""
msg = warning.args[0]
loc = warning.loc
print("=" * 80, file=file)
if loc is not None:
print(f"{loc.filename}:{loc.line}", file=file)
if loc is not None:
_show_location(loc, "", None, "MAGENTA", file=file)
print("~" * 80, file=file)
print(f"{warning.__class__.__name__}: {msg}", file=file) |
Python | def closure_convert(root):
"""Closure-convert all graphs starting from root.
The resulting graphs will have no free variables, but will instead get the
values of their free variables through additional arguments placed at the
beginning.
This is a destructive operation.
"""
mng = manage(root)
fvs = {gg: list(g_fvs) for gg, g_fvs in mng.free_variables_total.items()}
with mng.transact() as tr:
repl = defaultdict(dict)
for g in mng.graphs:
new_params = []
for node in fvs.get(g, []):
with About(node.debug, "fv"):
param = Parameter(g)
param.abstract = node.abstract
new_params.append(param)
tr.set_parameters(g, new_params + g.parameters)
repl[g][node] = param
closures = [(g, g.parent) for g in mng.graphs if g.parent]
for g, parent in closures:
# This loop creates an incomplete partial() call and sets it in the
# repl directory immediately.
sexp = (P.partial, g)
repl[parent][g] = sexp_to_node(sexp, parent)
for g, parent in closures:
# This loop completes the partials. It's important to do this using
# two loops, because a closure's free variables may contain a
# different partial, so we want all of them to be available.
closure_args = []
for fv in fvs[g]:
if isinstance(fv, Graph):
arg = repl[parent].get(fv, Constant(fv))
else:
arg = repl[parent].get(fv, fv)
closure_args.append(arg)
repl[parent][g].inputs[2:] = closure_args
for g in mng.graphs:
rg = repl[g]
for node in g.nodes:
if node.is_apply():
for i, inp in enumerate(node.inputs):
if inp in rg:
tr.set_edge(node, i, rg[inp])
elif inp.is_constant_graph() and inp.value in rg:
tr.set_edge(node, i, rg[inp.value])
return root | def closure_convert(root):
"""Closure-convert all graphs starting from root.
The resulting graphs will have no free variables, but will instead get the
values of their free variables through additional arguments placed at the
beginning.
This is a destructive operation.
"""
mng = manage(root)
fvs = {gg: list(g_fvs) for gg, g_fvs in mng.free_variables_total.items()}
with mng.transact() as tr:
repl = defaultdict(dict)
for g in mng.graphs:
new_params = []
for node in fvs.get(g, []):
with About(node.debug, "fv"):
param = Parameter(g)
param.abstract = node.abstract
new_params.append(param)
tr.set_parameters(g, new_params + g.parameters)
repl[g][node] = param
closures = [(g, g.parent) for g in mng.graphs if g.parent]
for g, parent in closures:
# This loop creates an incomplete partial() call and sets it in the
# repl directory immediately.
sexp = (P.partial, g)
repl[parent][g] = sexp_to_node(sexp, parent)
for g, parent in closures:
# This loop completes the partials. It's important to do this using
# two loops, because a closure's free variables may contain a
# different partial, so we want all of them to be available.
closure_args = []
for fv in fvs[g]:
if isinstance(fv, Graph):
arg = repl[parent].get(fv, Constant(fv))
else:
arg = repl[parent].get(fv, fv)
closure_args.append(arg)
repl[parent][g].inputs[2:] = closure_args
for g in mng.graphs:
rg = repl[g]
for node in g.nodes:
if node.is_apply():
for i, inp in enumerate(node.inputs):
if inp in rg:
tr.set_edge(node, i, rg[inp])
elif inp.is_constant_graph() and inp.value in rg:
tr.set_edge(node, i, rg[inp.value])
return root |
Python | async def infer_broadcast_shape(
self, engine, xs: u64tup_typecheck, ys: u64tup_typecheck
):
"""Infer the return type of primitive `broadcast_shape`."""
shp_x = tuple(x.xvalue() for x in xs.elements)
shp_y = tuple(y.xvalue() for y in ys.elements)
elems = []
try:
res = pyimpl_broadcast_shape(shp_x, shp_y)
except ValueError as e:
raise MyiaShapeError(e.args[0])
for n in res:
elems.append(AbstractScalar({VALUE: n, TYPE: xtype.UInt[64]}))
return AbstractTuple(elems) | async def infer_broadcast_shape(
self, engine, xs: u64tup_typecheck, ys: u64tup_typecheck
):
"""Infer the return type of primitive `broadcast_shape`."""
shp_x = tuple(x.xvalue() for x in xs.elements)
shp_y = tuple(y.xvalue() for y in ys.elements)
elems = []
try:
res = pyimpl_broadcast_shape(shp_x, shp_y)
except ValueError as e:
raise MyiaShapeError(e.args[0])
for n in res:
elems.append(AbstractScalar({VALUE: n, TYPE: xtype.UInt[64]}))
return AbstractTuple(elems) |
Python | def _get_conv_output_shape(
image_shape, kernel_shape, border_mode, subsample, filter_dilation
):
"""This function compute the output shape of convolution operation.
Copied and simplified from Theano (2020/11/08):
https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py
Parameters
----------
image_shape: tuple of int corresponding to the input
image shape. Its four (or five) element must correspond respectively
to: batch size, number of input channels, height and width (and
possibly depth) of the image. None where undefined.
kernel_shape: tuple of int corresponding to the
kernel shape. For a normal convolution, its four (for 2D convolution)
or five (for 3D convolution) elements must correspond respectively to :
number of output channels, number of input channels, height and width
(and possibly depth) of the kernel.
For an unshared 2D convolution, its six channels must correspond to :
number of output channels, height and width of the output, number of
input channels, height and width of the kernel.
None where undefined.
border_mode: string, or tuple of int. If it is a string, it must be 'valid'
or 'full'. If it is a tuple, its two (or three) elements respectively
correspond to the padding on height and width (and possibly depth)
axis.
subsample: tuple of int. Its two or three elements
respectively correspond to the subsampling on height and width (and
possibly depth) axis.
filter_dilation: tuple of int. Its two or three
elements correspond respectively to the dilation on height and width axis.
Returns
-------
output_shape: tuple of int corresponding to the output image shape. Its
four element must correspond respectively to: batch size, number of
output channels, height and width of the image.
"""
bsize, imshp = image_shape[0], image_shape[2:]
convdim = len(image_shape) - 2
nkern, kshp = kernel_shape[0], kernel_shape[-convdim:]
if isinstance(border_mode, tuple):
out_shp = tuple(
_get_conv_shape_1axis(
imshp[i],
kshp[i],
border_mode[i],
subsample[i],
filter_dilation[i],
)
for i in range(len(subsample))
)
else:
out_shp = tuple(
_get_conv_shape_1axis(
imshp[i], kshp[i], border_mode, subsample[i], filter_dilation[i]
)
for i in range(len(subsample))
)
return (bsize, nkern) + out_shp | def _get_conv_output_shape(
image_shape, kernel_shape, border_mode, subsample, filter_dilation
):
"""This function compute the output shape of convolution operation.
Copied and simplified from Theano (2020/11/08):
https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py
Parameters
----------
image_shape: tuple of int corresponding to the input
image shape. Its four (or five) element must correspond respectively
to: batch size, number of input channels, height and width (and
possibly depth) of the image. None where undefined.
kernel_shape: tuple of int corresponding to the
kernel shape. For a normal convolution, its four (for 2D convolution)
or five (for 3D convolution) elements must correspond respectively to :
number of output channels, number of input channels, height and width
(and possibly depth) of the kernel.
For an unshared 2D convolution, its six channels must correspond to :
number of output channels, height and width of the output, number of
input channels, height and width of the kernel.
None where undefined.
border_mode: string, or tuple of int. If it is a string, it must be 'valid'
or 'full'. If it is a tuple, its two (or three) elements respectively
correspond to the padding on height and width (and possibly depth)
axis.
subsample: tuple of int. Its two or three elements
respectively correspond to the subsampling on height and width (and
possibly depth) axis.
filter_dilation: tuple of int. Its two or three
elements correspond respectively to the dilation on height and width axis.
Returns
-------
output_shape: tuple of int corresponding to the output image shape. Its
four element must correspond respectively to: batch size, number of
output channels, height and width of the image.
"""
bsize, imshp = image_shape[0], image_shape[2:]
convdim = len(image_shape) - 2
nkern, kshp = kernel_shape[0], kernel_shape[-convdim:]
if isinstance(border_mode, tuple):
out_shp = tuple(
_get_conv_shape_1axis(
imshp[i],
kshp[i],
border_mode[i],
subsample[i],
filter_dilation[i],
)
for i in range(len(subsample))
)
else:
out_shp = tuple(
_get_conv_shape_1axis(
imshp[i], kshp[i], border_mode, subsample[i], filter_dilation[i]
)
for i in range(len(subsample))
)
return (bsize, nkern) + out_shp |
Python | def _conv2d(img, kern, mode="valid", dilation=(1, 1), groups=1):
"""Basic slow Python 2D or 3D convolution for DebugMode.
Copied and simplified from Theano (2020/11/08):
https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py
"""
convdim = 2
assert mode in ("valid", "full")
out_shape = _get_conv_output_shape(
img.shape, kern.shape, mode, [1] * convdim, dilation
)
dil_kern_shp = kern.shape[:-convdim] + tuple(
(kern.shape[-convdim + i] - 1) * dilation[i] + 1 for i in range(convdim)
)
dilated_kern = np.zeros(dil_kern_shp, dtype=kern.dtype)
dilated_kern[
(slice(None),) * (dilated_kern.ndim - convdim)
+ tuple(slice(None, None, dilation[i]) for i in range(convdim))
] = kern
out = np.zeros(out_shape, dtype=img.dtype)
input_channel_offset = img.shape[1] // groups
output_channel_offset = kern.shape[0] // groups
val = _valfrommode(mode)
bval = _bvalfromboundary("fill")
with warnings.catch_warnings():
warnings.simplefilter("ignore", np.ComplexWarning)
for b in range(img.shape[0]):
for g in range(groups):
for n in range(output_channel_offset):
for im0 in range(input_channel_offset):
# some cast generates a warning here
out[
b, g * output_channel_offset + n, ...
] += _convolve2d(
img[b, g * input_channel_offset + im0, ...],
dilated_kern[
g * output_channel_offset + n, im0, ...
],
1,
val,
bval,
0,
)
return out | def _conv2d(img, kern, mode="valid", dilation=(1, 1), groups=1):
"""Basic slow Python 2D or 3D convolution for DebugMode.
Copied and simplified from Theano (2020/11/08):
https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py
"""
convdim = 2
assert mode in ("valid", "full")
out_shape = _get_conv_output_shape(
img.shape, kern.shape, mode, [1] * convdim, dilation
)
dil_kern_shp = kern.shape[:-convdim] + tuple(
(kern.shape[-convdim + i] - 1) * dilation[i] + 1 for i in range(convdim)
)
dilated_kern = np.zeros(dil_kern_shp, dtype=kern.dtype)
dilated_kern[
(slice(None),) * (dilated_kern.ndim - convdim)
+ tuple(slice(None, None, dilation[i]) for i in range(convdim))
] = kern
out = np.zeros(out_shape, dtype=img.dtype)
input_channel_offset = img.shape[1] // groups
output_channel_offset = kern.shape[0] // groups
val = _valfrommode(mode)
bval = _bvalfromboundary("fill")
with warnings.catch_warnings():
warnings.simplefilter("ignore", np.ComplexWarning)
for b in range(img.shape[0]):
for g in range(groups):
for n in range(output_channel_offset):
for im0 in range(input_channel_offset):
# some cast generates a warning here
out[
b, g * output_channel_offset + n, ...
] += _convolve2d(
img[b, g * input_channel_offset + im0, ...],
dilated_kern[
g * output_channel_offset + n, im0, ...
],
1,
val,
bval,
0,
)
return out |
Python | def conv2d_weight_grad(
input, weight_size, grad_output, stride, padding, dilation, groups
):
"""Computes gradient of conv2d with respect to the weight.
Adapted from Pytorch backend.
"""
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = np.tile(
np.ascontiguousarray(grad_output), (1, in_channels // groups, 1, 1)
)
grad_output = np.ascontiguousarray(grad_output).reshape(
(
grad_output.shape[0] * grad_output.shape[1],
1,
grad_output.shape[2],
grad_output.shape[3],
)
)
input = np.ascontiguousarray(input).reshape(
(1, input.shape[0] * input.shape[1], input.shape[2], input.shape[3])
)
grad_weight = conv2d(
inp=input,
weight=grad_output,
dilation=stride,
padding=padding,
strides=dilation,
groups=in_channels * min_batch,
)
grad_weight = np.ascontiguousarray(grad_weight).reshape(
(
min_batch,
grad_weight.shape[1] // min_batch,
grad_weight.shape[2],
grad_weight.shape[3],
)
)
if groups > 1:
return np.sum(grad_weight, axis=0).reshape(
(
out_channels,
in_channels // groups,
grad_weight.shape[2],
grad_weight.shape[3],
)
)[:, :, : weight_size[2], :][:, :, :, : weight_size[3]]
else:
return (
np.sum(grad_weight, axis=0)
.reshape(
(
in_channels // groups,
out_channels,
grad_weight.shape[2],
grad_weight.shape[3],
)
)
.transpose(1, 0, 2, 3)[:, :, : weight_size[2], :][
:, :, :, : weight_size[3]
]
) | def conv2d_weight_grad(
input, weight_size, grad_output, stride, padding, dilation, groups
):
"""Computes gradient of conv2d with respect to the weight.
Adapted from Pytorch backend.
"""
in_channels = input.shape[1]
out_channels = grad_output.shape[1]
min_batch = input.shape[0]
grad_output = np.tile(
np.ascontiguousarray(grad_output), (1, in_channels // groups, 1, 1)
)
grad_output = np.ascontiguousarray(grad_output).reshape(
(
grad_output.shape[0] * grad_output.shape[1],
1,
grad_output.shape[2],
grad_output.shape[3],
)
)
input = np.ascontiguousarray(input).reshape(
(1, input.shape[0] * input.shape[1], input.shape[2], input.shape[3])
)
grad_weight = conv2d(
inp=input,
weight=grad_output,
dilation=stride,
padding=padding,
strides=dilation,
groups=in_channels * min_batch,
)
grad_weight = np.ascontiguousarray(grad_weight).reshape(
(
min_batch,
grad_weight.shape[1] // min_batch,
grad_weight.shape[2],
grad_weight.shape[3],
)
)
if groups > 1:
return np.sum(grad_weight, axis=0).reshape(
(
out_channels,
in_channels // groups,
grad_weight.shape[2],
grad_weight.shape[3],
)
)[:, :, : weight_size[2], :][:, :, :, : weight_size[3]]
else:
return (
np.sum(grad_weight, axis=0)
.reshape(
(
in_channels // groups,
out_channels,
grad_weight.shape[2],
grad_weight.shape[3],
)
)
.transpose(1, 0, 2, 3)[:, :, : weight_size[2], :][
:, :, :, : weight_size[3]
]
) |
Python | def conv_transpose2d(
data, weight, strides, padding, output_padding, groups, dilation
):
"""Implement conv2d_transpose using conv2d.
Adapted from Theano and Relay backend.
Theano reference (2020/11/08):
https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py
"""
data_shape = data.shape
kern_shape = weight.shape
n, _, h_in, w_in = data_shape
filter_h, filter_w = kern_shape[2:]
c_out = kern_shape[1] * groups
h_out = (
(h_in - 1) * strides[0]
- 2 * padding[0]
+ dilation[0] * (filter_h - 1)
+ output_padding[0]
+ 1
)
w_out = (
(w_in - 1) * strides[1]
- 2 * padding[1]
+ dilation[1] * (filter_w - 1)
+ output_padding[1]
+ 1
)
kern = weight
topgrad = data
shape = (h_out, w_out)
imshp = (n, c_out, h_out, w_out)
convdim = 2
assert topgrad.ndim == kern.ndim == 2 + convdim
dil_kernshp = tuple(
(kern.shape[-convdim + i] - 1) * dilation[i] + 1 for i in range(convdim)
)
pad = tuple((m, m) for m in padding)
expected_topgrad_shape = _get_conv_output_shape(
imshp, kern.shape, padding, strides, dilation
)
if expected_topgrad_shape != tuple(topgrad.shape):
# If expected topgrad is larger than given topgrad,
# padding dimensions at end seems sufficient to produce
# right conv_transpose2d output.
assert all(
expected >= given
for (expected, given) in zip(expected_topgrad_shape, topgrad.shape)
), (
"invalid input_shape for gradInputs: the given input_shape "
"would produce an output of shape {}, but the given topgrad "
"has shape {}".format(
tuple(expected_topgrad_shape), tuple(topgrad.shape)
)
)
tmp = np.zeros(expected_topgrad_shape, dtype=topgrad.dtype)
tmp[tuple(slice(None, val) for val in topgrad.shape)] = topgrad
topgrad = tmp
if any(strides[i] > 1 for i in range(convdim)):
new_shape = (topgrad.shape[0], topgrad.shape[1]) + tuple(
shape[i] + pad[i][0] + pad[i][1] - dil_kernshp[i] + 1
for i in range(convdim)
)
new_topgrad = np.zeros(new_shape, dtype=topgrad.dtype)
new_topgrad[
(slice(None), slice(None))
+ tuple(slice(None, None, strides[i]) for i in range(convdim))
] = topgrad
topgrad = new_topgrad
def correct_for_groups(mat):
mshp0 = mat.shape[0] // groups
mshp1 = mat.shape[-convdim - 1] * groups
mat = mat.reshape((groups, mshp0) + mat.shape[1:])
mat = mat.transpose((1, 0, 2) + tuple(range(3, 3 + convdim)))
mat = mat.reshape((mshp0, mshp1) + mat.shape[-convdim:])
return mat
kern = correct_for_groups(kern)
axes_order = (1, 0) + tuple(range(2, 2 + convdim))
kern = kern.transpose(axes_order)
img = _conv2d(topgrad, kern, mode="full", dilation=dilation, groups=groups)
if any(p != (0, 0) for p in pad):
img = img[
(slice(None), slice(None))
+ tuple(
slice(pad[i][0], img.shape[i + 2] - pad[i][1])
for i in range(convdim)
)
]
return img | def conv_transpose2d(
data, weight, strides, padding, output_padding, groups, dilation
):
"""Implement conv2d_transpose using conv2d.
Adapted from Theano and Relay backend.
Theano reference (2020/11/08):
https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py
"""
data_shape = data.shape
kern_shape = weight.shape
n, _, h_in, w_in = data_shape
filter_h, filter_w = kern_shape[2:]
c_out = kern_shape[1] * groups
h_out = (
(h_in - 1) * strides[0]
- 2 * padding[0]
+ dilation[0] * (filter_h - 1)
+ output_padding[0]
+ 1
)
w_out = (
(w_in - 1) * strides[1]
- 2 * padding[1]
+ dilation[1] * (filter_w - 1)
+ output_padding[1]
+ 1
)
kern = weight
topgrad = data
shape = (h_out, w_out)
imshp = (n, c_out, h_out, w_out)
convdim = 2
assert topgrad.ndim == kern.ndim == 2 + convdim
dil_kernshp = tuple(
(kern.shape[-convdim + i] - 1) * dilation[i] + 1 for i in range(convdim)
)
pad = tuple((m, m) for m in padding)
expected_topgrad_shape = _get_conv_output_shape(
imshp, kern.shape, padding, strides, dilation
)
if expected_topgrad_shape != tuple(topgrad.shape):
# If expected topgrad is larger than given topgrad,
# padding dimensions at end seems sufficient to produce
# right conv_transpose2d output.
assert all(
expected >= given
for (expected, given) in zip(expected_topgrad_shape, topgrad.shape)
), (
"invalid input_shape for gradInputs: the given input_shape "
"would produce an output of shape {}, but the given topgrad "
"has shape {}".format(
tuple(expected_topgrad_shape), tuple(topgrad.shape)
)
)
tmp = np.zeros(expected_topgrad_shape, dtype=topgrad.dtype)
tmp[tuple(slice(None, val) for val in topgrad.shape)] = topgrad
topgrad = tmp
if any(strides[i] > 1 for i in range(convdim)):
new_shape = (topgrad.shape[0], topgrad.shape[1]) + tuple(
shape[i] + pad[i][0] + pad[i][1] - dil_kernshp[i] + 1
for i in range(convdim)
)
new_topgrad = np.zeros(new_shape, dtype=topgrad.dtype)
new_topgrad[
(slice(None), slice(None))
+ tuple(slice(None, None, strides[i]) for i in range(convdim))
] = topgrad
topgrad = new_topgrad
def correct_for_groups(mat):
mshp0 = mat.shape[0] // groups
mshp1 = mat.shape[-convdim - 1] * groups
mat = mat.reshape((groups, mshp0) + mat.shape[1:])
mat = mat.transpose((1, 0, 2) + tuple(range(3, 3 + convdim)))
mat = mat.reshape((mshp0, mshp1) + mat.shape[-convdim:])
return mat
kern = correct_for_groups(kern)
axes_order = (1, 0) + tuple(range(2, 2 + convdim))
kern = kern.transpose(axes_order)
img = _conv2d(topgrad, kern, mode="full", dilation=dilation, groups=groups)
if any(p != (0, 0) for p in pad):
img = img[
(slice(None), slice(None))
+ tuple(
slice(pad[i][0], img.shape[i + 2] - pad[i][1])
for i in range(convdim)
)
]
return img |
Python | def argmax(x, dim):
"""Implementation of argmax primitive.
Adapted from Pytorch backend.
"""
dim = tuple(sorted(dim))
n = ()
for _s in range(len(x.shape)):
if _s not in dim:
n = n + (_s,)
n = n + dim
# x = x.permute(n)
x = np.transpose(x, n)
ns = x.shape[0 : -len(dim)] + (-1,)
r = np.argmax(x.reshape(ns), -1)
rl = list(r.shape)
for _sd in dim:
rl.insert(_sd, 1)
rf = tuple(rl)
return np.reshape(r, rf) | def argmax(x, dim):
"""Implementation of argmax primitive.
Adapted from Pytorch backend.
"""
dim = tuple(sorted(dim))
n = ()
for _s in range(len(x.shape)):
if _s not in dim:
n = n + (_s,)
n = n + dim
# x = x.permute(n)
x = np.transpose(x, n)
ns = x.shape[0 : -len(dim)] + (-1,)
r = np.argmax(x.reshape(ns), -1)
rl = list(r.shape)
for _sd in dim:
rl.insert(_sd, 1)
rf = tuple(rl)
return np.reshape(r, rf) |
Python | def _max_pool2d_out_shape(
imgshape, ws, stride, pad, ndim,
):
"""Return the shape of max_pool2d output.
Adapted from Theano (2020/11/08):
https://github.com/Theano/Theano/blob/master/theano/tensor/signal/pool.py
Parameters
----------
imgshape : tuple, list, or similar of integer or scalar Theano variable
The shape of a tensor of images. The last N elements are
interpreted as the number of rows, and the number of cols.
ws : list or tuple of N ints
Downsample factor over rows and column.
ws indicates the pool region size.
stride : list or tuple of N ints
Stride size, which is the number of shifts over rows/cols/slices to get the
next pool region.
pad : tuple of N ints
For each downsampling dimension, this specifies the number of zeros to
add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the
size of the top and bottom margins, pad_w specifies the size of the left and
right margins.
ndim : int
The number of pooling dimensions N.
The default is 2.
Returns
-------
list
The shape of the output from this op, for input of given shape.
"""
assert ndim > 0
assert (
len(imgshape) >= ndim
), "imgshape must have at least {} dimensions".format(ndim)
# Compute output shape based on formula on Torch page (2020/11/16):
# https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html#torch.nn.MaxPool2d
h_in, w_in = imgshape[-ndim:]
h_out = int((h_in + 2 * pad[0] - (ws[0] - 1) - 1) // stride[0] + 1)
w_out = int((w_in + 2 * pad[1] - (ws[1] - 1) - 1) // stride[1] + 1)
rval = list(imgshape[:-ndim]) + [h_out, w_out]
return rval | def _max_pool2d_out_shape(
imgshape, ws, stride, pad, ndim,
):
"""Return the shape of max_pool2d output.
Adapted from Theano (2020/11/08):
https://github.com/Theano/Theano/blob/master/theano/tensor/signal/pool.py
Parameters
----------
imgshape : tuple, list, or similar of integer or scalar Theano variable
The shape of a tensor of images. The last N elements are
interpreted as the number of rows, and the number of cols.
ws : list or tuple of N ints
Downsample factor over rows and column.
ws indicates the pool region size.
stride : list or tuple of N ints
Stride size, which is the number of shifts over rows/cols/slices to get the
next pool region.
pad : tuple of N ints
For each downsampling dimension, this specifies the number of zeros to
add as padding on both sides. For 2D and (pad_h, pad_w), pad_h specifies the
size of the top and bottom margins, pad_w specifies the size of the left and
right margins.
ndim : int
The number of pooling dimensions N.
The default is 2.
Returns
-------
list
The shape of the output from this op, for input of given shape.
"""
assert ndim > 0
assert (
len(imgshape) >= ndim
), "imgshape must have at least {} dimensions".format(ndim)
# Compute output shape based on formula on Torch page (2020/11/16):
# https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html#torch.nn.MaxPool2d
h_in, w_in = imgshape[-ndim:]
h_out = int((h_in + 2 * pad[0] - (ws[0] - 1) - 1) // stride[0] + 1)
w_out = int((w_in + 2 * pad[1] - (ws[1] - 1) - 1) // stride[1] + 1)
rval = list(imgshape[:-ndim]) + [h_out, w_out]
return rval |
Python | def _validate(self):
"""Check that all the argument names are valid."""
if isinstance(self.func, type):
f = getattr(self.func, "__init__", self.func)
else:
f = self.func
_, invalid = partition_keywords(f, self.keywords)
if invalid:
keys = ", ".join(f"'{k}'" for k in invalid.keys())
raise TypeError(f"{f} has no argument(s) named {keys}") | def _validate(self):
"""Check that all the argument names are valid."""
if isinstance(self.func, type):
f = getattr(self.func, "__init__", self.func)
else:
f = self.func
_, invalid = partition_keywords(f, self.keywords)
if invalid:
keys = ", ".join(f"'{k}'" for k in invalid.keys())
raise TypeError(f"{f} has no argument(s) named {keys}") |
Python | async def infer_make_kwarg(self, engine, key, value):
"""Infer the return type of primitive `make_kwarg`."""
k = key.xvalue()
assert isinstance(k, str)
return AbstractKeywordArgument(k, value) | async def infer_make_kwarg(self, engine, key, value):
"""Infer the return type of primitive `make_kwarg`."""
k = key.xvalue()
assert isinstance(k, str)
return AbstractKeywordArgument(k, value) |
Python | def untested():
"""Wrap legacy code that isn't covered anymore but we are not sure why."""
warnings.warn(
UserWarning(
"You triggered code that used to be essential but stopped being"
" covered for some reason. Please report your use case so that we"
" can validate the code again and add a test for it."
),
stacklevel=3,
)
yield | def untested():
"""Wrap legacy code that isn't covered anymore but we are not sure why."""
warnings.warn(
UserWarning(
"You triggered code that used to be essential but stopped being"
" covered for some reason. Please report your use case so that we"
" can validate the code again and add a test for it."
),
stacklevel=3,
)
yield |
Python | async def infer_array_map(self, engine, fn: AbstractFunctionBase, *arrays):
"""Infer the return type of primitive `array_map`."""
if len(arrays) < 1:
raise MyiaTypeError("array_map requires at least one array")
for arr in arrays:
await engine.check_immediate(AbstractArray, arr)
subargs = [a.element for a in arrays]
result = await engine.execute(fn, *subargs)
shapes = [a.xshape() for a in arrays]
shape0, *rest = shapes
if any(len(s) != len(shape0) for s in rest): # pragma: no cover
# check_immediate above is checking this for us, although
# the error message is poor
raise MyiaShapeError("Expect same shapes for array_map")
rshape = []
for entries in zip(*shapes):
entries = set(entries)
entries.add(ANYTHING)
if len(entries) == 1:
rshape.append(ANYTHING)
elif len(entries) == 2:
entries.remove(ANYTHING)
(entry,) = entries
rshape.append(entry)
else:
raise MyiaShapeError("Expect same shapes for array_map")
for arr in arrays:
if arrays[0].xtype() != arr.xtype():
raise MyiaTypeError(
f"Expect array of type {arrays[0].xtype()} "
f"to have same type as array of type {arr.xtype()}"
)
return type(arrays[0])(
result, {SHAPE: tuple(rshape), TYPE: arrays[0].xtype()}
) | async def infer_array_map(self, engine, fn: AbstractFunctionBase, *arrays):
"""Infer the return type of primitive `array_map`."""
if len(arrays) < 1:
raise MyiaTypeError("array_map requires at least one array")
for arr in arrays:
await engine.check_immediate(AbstractArray, arr)
subargs = [a.element for a in arrays]
result = await engine.execute(fn, *subargs)
shapes = [a.xshape() for a in arrays]
shape0, *rest = shapes
if any(len(s) != len(shape0) for s in rest): # pragma: no cover
# check_immediate above is checking this for us, although
# the error message is poor
raise MyiaShapeError("Expect same shapes for array_map")
rshape = []
for entries in zip(*shapes):
entries = set(entries)
entries.add(ANYTHING)
if len(entries) == 1:
rshape.append(ANYTHING)
elif len(entries) == 2:
entries.remove(ANYTHING)
(entry,) = entries
rshape.append(entry)
else:
raise MyiaShapeError("Expect same shapes for array_map")
for arr in arrays:
if arrays[0].xtype() != arr.xtype():
raise MyiaTypeError(
f"Expect array of type {arrays[0].xtype()} "
f"to have same type as array of type {arr.xtype()}"
)
return type(arrays[0])(
result, {SHAPE: tuple(rshape), TYPE: arrays[0].xtype()}
) |
Python | def param(self, graph, model):
"""Create a new parameter for the graph based on the model node.
The abstract field of the model will be copied for the new
parameter.
"""
with About(model.debug, self.relation):
# param = graph.add_parameter()
param = Parameter(graph)
param.abstract = model.abstract
return param | def param(self, graph, model):
"""Create a new parameter for the graph based on the model node.
The abstract field of the model will be copied for the new
parameter.
"""
with About(model.debug, self.relation):
# param = graph.add_parameter()
param = Parameter(graph)
param.abstract = model.abstract
return param |
Python | def make_groups(self):
"""Group the graphs according to their uses.
Returns {graph: entry}.
Each resulting entry contains the following fields:
* graph: The graph for this entry.
* eqv: A set of graphs that must be rewritten identically.
* calls: A {call_site: graphs} dict that maps a node to the
set of graphs that may be called there. That set may not
include the graph for this entry. Only one graph in the
eqv set will have a non-empty dictionary here.
"""
entries = {}
for g in self.graphs:
if g in entries:
continue
self._make_group(g, entries)
return entries | def make_groups(self):
"""Group the graphs according to their uses.
Returns {graph: entry}.
Each resulting entry contains the following fields:
* graph: The graph for this entry.
* eqv: A set of graphs that must be rewritten identically.
* calls: A {call_site: graphs} dict that maps a node to the
set of graphs that may be called there. That set may not
include the graph for this entry. Only one graph in the
eqv set will have a non-empty dictionary here.
"""
entries = {}
for g in self.graphs:
if g in entries:
continue
self._make_group(g, entries)
return entries |
Python | def order_key(self, g):
"""Return a key to sort graphs.
Graphs with a lower key will be processed first.
Arguments:
g: The graph to order.
"""
raise NotImplementedError("Override in subclass") | def order_key(self, g):
"""Return a key to sort graphs.
Graphs with a lower key will be processed first.
Arguments:
g: The graph to order.
"""
raise NotImplementedError("Override in subclass") |
Python | def rewrite_call(self, node, entry):
"""Rewrite the given call site.
self.manager should be used to perform the rewriting, either using
a transaction or directly.
Arguments:
node: A call site to rewrite.
entry: An entry with the information needed to perform the rewrite.
Note that entry.graph is not necessarily callable from this
call site, but one or more of the graphs in entry.eqv are.
Returns:
True if any changes were made.
"""
raise NotImplementedError("Override in subclass") | def rewrite_call(self, node, entry):
"""Rewrite the given call site.
self.manager should be used to perform the rewriting, either using
a transaction or directly.
Arguments:
node: A call site to rewrite.
entry: An entry with the information needed to perform the rewrite.
Note that entry.graph is not necessarily callable from this
call site, but one or more of the graphs in entry.eqv are.
Returns:
True if any changes were made.
"""
raise NotImplementedError("Override in subclass") |
Python | def rewrite_graph(self, entry):
"""Rewrite the graph for this entry.
The call sites are rewritten before the graphs.
self.manager should be used to perform the rewriting, either using
a transaction or directly. The parameters should be changed using
the manager/transaction, not with `graph.add_parameter`.
Arguments:
entry: entry.graph is the graph to be rewritten.
Returns:
True if any changes were made.
"""
raise NotImplementedError("Override in subclass") | def rewrite_graph(self, entry):
"""Rewrite the graph for this entry.
The call sites are rewritten before the graphs.
self.manager should be used to perform the rewriting, either using
a transaction or directly. The parameters should be changed using
the manager/transaction, not with `graph.add_parameter`.
Arguments:
entry: entry.graph is the graph to be rewritten.
Returns:
True if any changes were made.
"""
raise NotImplementedError("Override in subclass") |
Python | def filter(self, entry, all_entries):
"""Keep the entry if graphs in eqv all miss common parameters."""
params_grouped = zip(*[g.parameters for g in entry.eqv])
entry.keep = [
any(self.manager.uses[p] for p in params)
for params in params_grouped
]
# No rewrite if all parameters are kept
return not all(entry.keep) | def filter(self, entry, all_entries):
"""Keep the entry if graphs in eqv all miss common parameters."""
params_grouped = zip(*[g.parameters for g in entry.eqv])
entry.keep = [
any(self.manager.uses[p] for p in params)
for params in params_grouped
]
# No rewrite if all parameters are kept
return not all(entry.keep) |
Python | def rewrite_call(self, call, entry):
"""Remove unused parameters from the call site."""
new_call = call.graph.apply(
call.inputs[0],
*[arg for arg, keep in zip(call.inputs[1:], entry.keep) if keep]
)
new_call.abstract = call.abstract
self.manager.replace(call, new_call)
return True | def rewrite_call(self, call, entry):
"""Remove unused parameters from the call site."""
new_call = call.graph.apply(
call.inputs[0],
*[arg for arg, keep in zip(call.inputs[1:], entry.keep) if keep]
)
new_call.abstract = call.abstract
self.manager.replace(call, new_call)
return True |
Python | def rewrite_graph(self, entry):
"""Remove unused parameters from the graph parameters."""
self.manager.set_parameters(
entry.graph,
[p for p, keep in zip(entry.graph.parameters, entry.keep) if keep],
)
return True | def rewrite_graph(self, entry):
"""Remove unused parameters from the graph parameters."""
self.manager.set_parameters(
entry.graph,
[p for p, keep in zip(entry.graph.parameters, entry.keep) if keep],
)
return True |
Python | def filter(self, entry, all_entries):
"""Only graphs that have free variables will be transformed.
In order for the lambda lifting to work properly when a function F
refers to a function G that cannot be lambda lifted but has free
variables (in other words, the G is a free variable of F), G will have
to be moved inside F's scope.
We only do this if all uses of G are inside the scope of F. Otherwise
we will not lambda lift F.
"""
g = entry.graph
fvg = {
g2
for g2 in g.free_variables_total
if isinstance(g2, Graph) and g2 not in all_entries
}
all_fvs = reduce(
operator.or_,
[gg.free_variables_extended for gg in entry.eqv],
OrderedSet(),
)
if all_fvs and all(
all(user in g.scope for user in g2.graph_users) for g2 in fvg
):
entry.fvs = all_fvs
entry.scope = {*g.scope, *fvg}
return True
else:
return False | def filter(self, entry, all_entries):
"""Only graphs that have free variables will be transformed.
In order for the lambda lifting to work properly when a function F
refers to a function G that cannot be lambda lifted but has free
variables (in other words, the G is a free variable of F), G will have
to be moved inside F's scope.
We only do this if all uses of G are inside the scope of F. Otherwise
we will not lambda lift F.
"""
g = entry.graph
fvg = {
g2
for g2 in g.free_variables_total
if isinstance(g2, Graph) and g2 not in all_entries
}
all_fvs = reduce(
operator.or_,
[gg.free_variables_extended for gg in entry.eqv],
OrderedSet(),
)
if all_fvs and all(
all(user in g.scope for user in g2.graph_users) for g2 in fvg
):
entry.fvs = all_fvs
entry.scope = {*g.scope, *fvg}
return True
else:
return False |
Python | def order_key(self, g):
"""Order graphs so that children are processed before parents.
Reverse the order so that children are processed before parents. This
is important when substituting the new parameters for the free
variables, because children that are lambda lifted must replace their
uses first (otherwise the original fvs would be replaced by their
parent's parameters, which is not what we want)
"""
if g.parent:
return self.order_key(g.parent) - 1
else:
return 0 | def order_key(self, g):
"""Order graphs so that children are processed before parents.
Reverse the order so that children are processed before parents. This
is important when substituting the new parameters for the free
variables, because children that are lambda lifted must replace their
uses first (otherwise the original fvs would be replaced by their
parent's parameters, which is not what we want)
"""
if g.parent:
return self.order_key(g.parent) - 1
else:
return 0 |
Python | def rewrite_call(self, node, entry):
"""For each closure, we add arguments to each call of the closure.
The arguments that are added are the original free variables, or
DEAD if none of the graphs that can be called at that site have that
free variable.
"""
fvs = [
fv
if any(fv in gg.free_variables_extended for gg in entry.calls[node])
else make_dead(fv)
for fv in entry.fvs
]
new_node = node.graph.apply(*node.inputs, *fvs)
new_node.abstract = node.abstract
self.manager.replace(node, new_node)
return True | def rewrite_call(self, node, entry):
"""For each closure, we add arguments to each call of the closure.
The arguments that are added are the original free variables, or
DEAD if none of the graphs that can be called at that site have that
free variable.
"""
fvs = [
fv
if any(fv in gg.free_variables_extended for gg in entry.calls[node])
else make_dead(fv)
for fv in entry.fvs
]
new_node = node.graph.apply(*node.inputs, *fvs)
new_node.abstract = node.abstract
self.manager.replace(node, new_node)
return True |
Python | def rewrite_graph(self, entry):
"""Rewrite the graphs.
New parameters are added for each free variable.
Then, we redirect all free variables within scope to the new
parameters, which means that they are not closures anymore.
"""
mng = self.manager
new_params = list(entry.graph.parameters)
with mng.transact() as tr:
# Redirect the fvs to the parameter (those in scope)
for fv in entry.fvs:
param = self.param(entry.graph, fv)
new_params.append(param)
if fv in entry.graph.free_variables_extended:
for node, idx in mng.uses[fv]:
if node.graph in entry.scope:
tr.set_edge(node, idx, param)
tr.set_parameters(entry.graph, new_params)
return True | def rewrite_graph(self, entry):
"""Rewrite the graphs.
New parameters are added for each free variable.
Then, we redirect all free variables within scope to the new
parameters, which means that they are not closures anymore.
"""
mng = self.manager
new_params = list(entry.graph.parameters)
with mng.transact() as tr:
# Redirect the fvs to the parameter (those in scope)
for fv in entry.fvs:
param = self.param(entry.graph, fv)
new_params.append(param)
if fv in entry.graph.free_variables_extended:
for node, idx in mng.uses[fv]:
if node.graph in entry.scope:
tr.set_edge(node, idx, param)
tr.set_parameters(entry.graph, new_params)
return True |
Python | def require_same(fns, objs):
"""Check that all objects have the same properties.
Arguments:
fns: A collection of functions. Each function must return the same
result when applied to each object. For example, the functions
may be `[type, len]`.
objs: Objects that must be invariant with respect to the given
functions.
"""
o, *rest = objs
for fn in fns:
for obj in rest:
if fn(o) != fn(obj):
raise TypeError(
"Objects do not have the same properties:"
f" `{o}` and `{obj}` are not conformant."
) | def require_same(fns, objs):
"""Check that all objects have the same properties.
Arguments:
fns: A collection of functions. Each function must return the same
result when applied to each object. For example, the functions
may be `[type, len]`.
objs: Objects that must be invariant with respect to the given
functions.
"""
o, *rest = objs
for fn in fns:
for obj in rest:
if fn(o) != fn(obj):
raise TypeError(
"Objects do not have the same properties:"
f" `{o}` and `{obj}` are not conformant."
) |
Python | def to_dict(self, keys):
"""Convert to a real dict with given keys.
Each key will be associated to the abstract value in output dict.
"""
return {key: self.value_type for key in keys} | def to_dict(self, keys):
"""Convert to a real dict with given keys.
Each key will be associated to the abstract value in output dict.
"""
return {key: self.value_type for key in keys} |
Python | def keys(self):
"""Return an empty iterable.
Placeholder to make AbstractDict work correctly, as it is called
in AbstractDict.__eqkey__.
"""
return () | def keys(self):
"""Return an empty iterable.
Placeholder to make AbstractDict work correctly, as it is called
in AbstractDict.__eqkey__.
"""
return () |
Python | def xtype(self):
"""Return the type of this AbstractValue."""
t = self.values.get(TYPE, None)
if isinstance(t, Pending) and t.done():
t = t.result()
return t | def xtype(self):
"""Return the type of this AbstractValue."""
t = self.values.get(TYPE, None)
if isinstance(t, Pending) and t.done():
t = t.result()
return t |
Python | def user_defined_version(self):
"""Return the user-defined version of this type.
This uses the attribute types as defined by the user, rather than what
is generated by the inferrer or other methods.
"""
from .to_abstract import type_to_abstract
return type_to_abstract(self.tag) | def user_defined_version(self):
"""Return the user-defined version of this type.
This uses the attribute types as defined by the user, rather than what
is generated by the inferrer or other methods.
"""
from .to_abstract import type_to_abstract
return type_to_abstract(self.tag) |
Python | def broaden(self, v, recurse, **kwargs):
"""Make a value more generic.
By default, this amounts to a straight copy.
"""
return recurse(v, **kwargs) | def broaden(self, v, recurse, **kwargs):
"""Make a value more generic.
By default, this amounts to a straight copy.
"""
return recurse(v, **kwargs) |
Python | def listof(t):
"""Return the type of a list of t."""
rval = AbstractADT.new(Cons, {"head": t, "tail": None})
rval.attributes["tail"] = AbstractUnion.new([empty, rval])
return rval.intern() | def listof(t):
"""Return the type of a list of t."""
rval = AbstractADT.new(Cons, {"head": t, "tail": None})
rval.attributes["tail"] = AbstractUnion.new([empty, rval])
return rval.intern() |
Python | def u64tup_typecheck(engine, tup):
"""Verify that tup is a tuple of uint64."""
tup_t = engine.check(AbstractTuple, tup)
for elem_t in tup_t.elements:
engine.abstract_merge(xtype.UInt[64], elem_t.xtype())
return tup_t | def u64tup_typecheck(engine, tup):
"""Verify that tup is a tuple of uint64."""
tup_t = engine.check(AbstractTuple, tup)
for elem_t in tup_t.elements:
engine.abstract_merge(xtype.UInt[64], elem_t.xtype())
return tup_t |
Python | def u64pair_typecheck(engine, shp):
"""Verify that tup is a pair of uint64."""
tup_t = u64tup_typecheck(engine, shp)
if len(tup_t.elements) != 2:
raise MyiaTypeError(
f"Expected Tuple Length 2, not Tuple Length"
f"{len(tup_t.elements)}"
)
return tup_t | def u64pair_typecheck(engine, shp):
"""Verify that tup is a pair of uint64."""
tup_t = u64tup_typecheck(engine, shp)
if len(tup_t.elements) != 2:
raise MyiaTypeError(
f"Expected Tuple Length 2, not Tuple Length"
f"{len(tup_t.elements)}"
)
return tup_t |
Python | def i64tup_typecheck(engine, tup):
"""Verify that tup is a tuple of int64."""
tup_t = engine.check(AbstractTuple, tup)
for elem_t in tup_t.elements:
engine.abstract_merge(xtype.Int[64], elem_t.xtype())
return tup_t | def i64tup_typecheck(engine, tup):
"""Verify that tup is a tuple of int64."""
tup_t = engine.check(AbstractTuple, tup)
for elem_t in tup_t.elements:
engine.abstract_merge(xtype.Int[64], elem_t.xtype())
return tup_t |
Python | def wrap_primitives(graph):
"""Helper function to wrap primitives.
This wraps all primitives used in non-call positions in a graph.
"""
mng = graph.manager
prim_graphs = {}
with mng.transact() as tr:
cts = {ct for cts in mng.constants.values() for ct in cts}
for ct in cts:
if ct.is_constant(Primitive):
for node, key in mng.uses[ct]:
if key != 0:
if (
key == 1
and node.inputs[0].is_constant()
and node.inputs[0].value
in (P.array_map, P.array_reduce)
):
continue
g = get_prim_graph(prim_graphs, ct.value, ct.abstract)
tr.set_edge(node, key, Constant(g))
return graph | def wrap_primitives(graph):
"""Helper function to wrap primitives.
This wraps all primitives used in non-call positions in a graph.
"""
mng = graph.manager
prim_graphs = {}
with mng.transact() as tr:
cts = {ct for cts in mng.constants.values() for ct in cts}
for ct in cts:
if ct.is_constant(Primitive):
for node, key in mng.uses[ct]:
if key != 0:
if (
key == 1
and node.inputs[0].is_constant()
and node.inputs[0].value
in (P.array_map, P.array_reduce)
):
continue
g = get_prim_graph(prim_graphs, ct.value, ct.abstract)
tr.set_edge(node, key, Constant(g))
return graph |
Python | def return_handles(graph):
"""Change the Universe output to return all the new values of handles."""
mng = graph.manager
handle_nodes = []
handle_idx = []
for i, p in enumerate(graph.parameters):
if isinstance(p.abstract, AbstractHandle):
handle_nodes.append(p)
handle_idx.append(i)
if len(handle_nodes) != 0:
ct0 = Constant(0)
ct1 = Constant(1)
ct0.abstract = to_abstract(0)
ct1.abstract = to_abstract(1)
old_a = graph.output.abstract
with mng.transact() as tr:
if graph.output.is_apply(P.make_tuple):
universe_out = graph.output.inputs[1]
normal_out = graph.output.inputs[2]
else:
assert isinstance(graph.output.abstract, AbstractTuple)
assert len(graph.output.abstract.elements) == 2
universe_out = graph.apply(P.tuple_getitem, graph.output, ct0)
universe_out.abstract = graph.output.abstract.elements[0]
normal_out = graph.apply(P.tuple_getitem, graph.output, ct1)
normal_out.abstract = graph.output.abstract.elements[1]
vals = [
graph.apply(P.universe_getitem, universe_out, n)
for n in handle_nodes
]
types = [n.abstract.element for n in handle_nodes]
for v, a in zip(vals, types):
v.abstract = a
handles = graph.apply(P.make_tuple, *vals)
handles.abstract = AbstractTuple(types)
new_out_node = graph.apply(P.make_tuple, handles, normal_out)
tr.replace(graph.output, new_out_node)
graph.output.abstract = AbstractTuple(
[handles.abstract] + old_a.elements[1:]
)
return graph, handle_idx | def return_handles(graph):
"""Change the Universe output to return all the new values of handles."""
mng = graph.manager
handle_nodes = []
handle_idx = []
for i, p in enumerate(graph.parameters):
if isinstance(p.abstract, AbstractHandle):
handle_nodes.append(p)
handle_idx.append(i)
if len(handle_nodes) != 0:
ct0 = Constant(0)
ct1 = Constant(1)
ct0.abstract = to_abstract(0)
ct1.abstract = to_abstract(1)
old_a = graph.output.abstract
with mng.transact() as tr:
if graph.output.is_apply(P.make_tuple):
universe_out = graph.output.inputs[1]
normal_out = graph.output.inputs[2]
else:
assert isinstance(graph.output.abstract, AbstractTuple)
assert len(graph.output.abstract.elements) == 2
universe_out = graph.apply(P.tuple_getitem, graph.output, ct0)
universe_out.abstract = graph.output.abstract.elements[0]
normal_out = graph.apply(P.tuple_getitem, graph.output, ct1)
normal_out.abstract = graph.output.abstract.elements[1]
vals = [
graph.apply(P.universe_getitem, universe_out, n)
for n in handle_nodes
]
types = [n.abstract.element for n in handle_nodes]
for v, a in zip(vals, types):
v.abstract = a
handles = graph.apply(P.make_tuple, *vals)
handles.abstract = AbstractTuple(types)
new_out_node = graph.apply(P.make_tuple, handles, normal_out)
tr.replace(graph.output, new_out_node)
graph.output.abstract = AbstractTuple(
[handles.abstract] + old_a.elements[1:]
)
return graph, handle_idx |
Python | def split(self, graph):
"""Split a graph into portions."""
splits = []
for node in toposort(graph.return_):
if self._is_cut(node):
splits.append(node)
elif not (node.is_constant() or node.is_parameter()):
splits.append([node])
return splits | def split(self, graph):
"""Split a graph into portions."""
splits = []
for node in toposort(graph.return_):
if self._is_cut(node):
splits.append(node)
elif not (node.is_constant() or node.is_parameter()):
splits.append([node])
return splits |
Python | def ref(self, node):
"""Get the stack reference for the value of a node.
This can actually cause a push if the node is a constant that
wasn't referred to before.
"""
if node not in self.slots and node.is_constant():
if node.is_constant_graph():
self.add_instr("push_graph", node.value)
else:
assert not isinstance(node.value, Primitive)
v = self.backend.to_backend_value(node.value, node.abstract)
self.add_instr("push", v)
self.push(node)
return self.slots[node] - self.height | def ref(self, node):
"""Get the stack reference for the value of a node.
This can actually cause a push if the node is a constant that
wasn't referred to before.
"""
if node not in self.slots and node.is_constant():
if node.is_constant_graph():
self.add_instr("push_graph", node.value)
else:
assert not isinstance(node.value, Primitive)
v = self.backend.to_backend_value(node.value, node.abstract)
self.add_instr("push", v)
self.push(node)
return self.slots[node] - self.height |
Python | def dup(self, node):
"""Ensures that the value for node is at the top of the stack."""
assert node in self.slots
self.add_instr("dup", self.ref(node))
self.height += 1
return -1 | def dup(self, node):
"""Ensures that the value for node is at the top of the stack."""
assert node in self.slots
self.add_instr("dup", self.ref(node))
self.height += 1
return -1 |
Python | def run(self, graph):
"""Convert the graph into a list of instructions."""
self._reset()
splits = self.split(graph)
for p in reversed(graph.parameters):
self.push(p)
param_height = self.height
for split in splits:
if isinstance(split, list):
run, inputs, outputs = self.lin_convert(split)
# prime the arguments because self.ref() can invalidate
# previously returned references if a new one is not ready
for i in inputs:
self.ref(i)
args = [self.ref(i) for i in inputs]
self.add_instr("external", run, args)
for o in outputs:
self.push(o)
else:
assert isinstance(split, Apply)
fn = split.inputs[0]
if fn.is_constant(Primitive):
# prime the arguemnts because self.ref() can invalidate
# previously returned references if a new one is not ready
for i in split.inputs[1:]:
self.ref(i)
if fn.value == P.return_:
self.add_instr(
"return", self.ref(split.inputs[1]), self.height
)
# execution stops here
break
elif fn.value == P.partial:
self.add_instr(
"partial",
self.ref(split.inputs[1]),
*tuple(self.ref(inp) for inp in split.inputs[2:]),
)
elif fn.value == P.switch:
self.add_instr(
"switch",
self.ref(split.inputs[1]),
self.ref(split.inputs[2]),
self.ref(split.inputs[3]),
)
elif fn.value == P.make_tuple:
self.add_instr(
"tuple", *[self.ref(i) for i in split.inputs[1:]]
)
elif fn.value == P.bool_and:
self.add_instr(
"bool_and",
self.ref(split.inputs[1]),
self.ref(split.inputs[2]),
)
elif fn.value == P.tuple_getitem:
self.add_instr(
"tuple_getitem",
self.ref(split.inputs[1]),
self.ref(split.inputs[2]),
)
elif fn.value == P.tuple_setitem:
self.add_instr(
"tuple_setitem",
self.ref(split.inputs[1]),
self.ref(split.inputs[2]),
self.ref(split.inputs[3]),
)
elif fn.value == P.tagged:
self.add_instr(
"tagged",
self.ref(split.inputs[1]),
self.ref(split.inputs[2]),
)
elif fn.value == P.hastag:
self.add_instr(
"hastag",
self.ref(split.inputs[1]),
self.ref(split.inputs[2]),
)
elif fn.value == P.casttag:
self.add_instr(
"casttag",
self.ref(split.inputs[1]),
self.ref(split.inputs[2]),
)
elif fn.value == P.unsafe_static_cast:
self.add_instr(
"unsafe_static_cast",
self.ref(split.inputs[1]),
self.ref(split.inputs[2]),
)
elif fn.value == P.env_getitem:
self.add_instr(
"env_getitem",
self.ref(split.inputs[1]),
split.inputs[2].value,
self.ref(split.inputs[3]),
)
elif fn.value == P.env_setitem:
self.add_instr(
"env_setitem",
self.ref(split.inputs[1]),
split.inputs[2].value,
self.ref(split.inputs[3]),
)
elif fn.value == P.env_add: # pragma: no cover
raise RuntimeError("apparently no model requires this")
self.add_instr(
"env_add",
self.ref(split.inputs[1]),
self.ref(split.inputs[2]),
)
else:
raise AssertionError(
f"Unknown special function " "{fn.value}"
)
else:
# ensure the function and arguments are available.
self.ref(fn)
for i in split.inputs[1:]:
self.ref(i)
# make references to the arguments
for i in reversed(split.inputs[1:]):
self.dup(i)
if split is graph.output:
self.add_instr(
"tailcall",
self.ref(fn),
self.height,
len(split.inputs[1:]),
)
# execution stops here
break
else:
self.add_instr("call", self.ref(fn))
self.ret(len(split.inputs) - 1)
self.push(split)
need_stack = self.max_height - param_height
if need_stack > 0:
self.instrs.insert(0, ("pad_stack", need_stack))
res = self.instrs
self._reset()
return res | def run(self, graph):
"""Convert the graph into a list of instructions."""
self._reset()
splits = self.split(graph)
for p in reversed(graph.parameters):
self.push(p)
param_height = self.height
for split in splits:
if isinstance(split, list):
run, inputs, outputs = self.lin_convert(split)
# prime the arguments because self.ref() can invalidate
# previously returned references if a new one is not ready
for i in inputs:
self.ref(i)
args = [self.ref(i) for i in inputs]
self.add_instr("external", run, args)
for o in outputs:
self.push(o)
else:
assert isinstance(split, Apply)
fn = split.inputs[0]
if fn.is_constant(Primitive):
# prime the arguemnts because self.ref() can invalidate
# previously returned references if a new one is not ready
for i in split.inputs[1:]:
self.ref(i)
if fn.value == P.return_:
self.add_instr(
"return", self.ref(split.inputs[1]), self.height
)
# execution stops here
break
elif fn.value == P.partial:
self.add_instr(
"partial",
self.ref(split.inputs[1]),
*tuple(self.ref(inp) for inp in split.inputs[2:]),
)
elif fn.value == P.switch:
self.add_instr(
"switch",
self.ref(split.inputs[1]),
self.ref(split.inputs[2]),
self.ref(split.inputs[3]),
)
elif fn.value == P.make_tuple:
self.add_instr(
"tuple", *[self.ref(i) for i in split.inputs[1:]]
)
elif fn.value == P.bool_and:
self.add_instr(
"bool_and",
self.ref(split.inputs[1]),
self.ref(split.inputs[2]),
)
elif fn.value == P.tuple_getitem:
self.add_instr(
"tuple_getitem",
self.ref(split.inputs[1]),
self.ref(split.inputs[2]),
)
elif fn.value == P.tuple_setitem:
self.add_instr(
"tuple_setitem",
self.ref(split.inputs[1]),
self.ref(split.inputs[2]),
self.ref(split.inputs[3]),
)
elif fn.value == P.tagged:
self.add_instr(
"tagged",
self.ref(split.inputs[1]),
self.ref(split.inputs[2]),
)
elif fn.value == P.hastag:
self.add_instr(
"hastag",
self.ref(split.inputs[1]),
self.ref(split.inputs[2]),
)
elif fn.value == P.casttag:
self.add_instr(
"casttag",
self.ref(split.inputs[1]),
self.ref(split.inputs[2]),
)
elif fn.value == P.unsafe_static_cast:
self.add_instr(
"unsafe_static_cast",
self.ref(split.inputs[1]),
self.ref(split.inputs[2]),
)
elif fn.value == P.env_getitem:
self.add_instr(
"env_getitem",
self.ref(split.inputs[1]),
split.inputs[2].value,
self.ref(split.inputs[3]),
)
elif fn.value == P.env_setitem:
self.add_instr(
"env_setitem",
self.ref(split.inputs[1]),
split.inputs[2].value,
self.ref(split.inputs[3]),
)
elif fn.value == P.env_add: # pragma: no cover
raise RuntimeError("apparently no model requires this")
self.add_instr(
"env_add",
self.ref(split.inputs[1]),
self.ref(split.inputs[2]),
)
else:
raise AssertionError(
f"Unknown special function " "{fn.value}"
)
else:
# ensure the function and arguments are available.
self.ref(fn)
for i in split.inputs[1:]:
self.ref(i)
# make references to the arguments
for i in reversed(split.inputs[1:]):
self.dup(i)
if split is graph.output:
self.add_instr(
"tailcall",
self.ref(fn),
self.height,
len(split.inputs[1:]),
)
# execution stops here
break
else:
self.add_instr("call", self.ref(fn))
self.ret(len(split.inputs) - 1)
self.push(split)
need_stack = self.max_height - param_height
if need_stack > 0:
self.instrs.insert(0, ("pad_stack", need_stack))
res = self.instrs
self._reset()
return res |
Python | def link(self):
"""Link instructions from multiple graphs together."""
for i in range(len(self.instrs)):
instr = self.instrs[i]
if instr[0] == "push_graph":
self.instrs[i] = ("push", self.mapping[instr[1]]) | def link(self):
"""Link instructions from multiple graphs together."""
for i in range(len(self.instrs)):
instr = self.instrs[i]
if instr[0] == "push_graph":
self.instrs[i] = ("push", self.mapping[instr[1]]) |
Python | def compile_and_link(self, graph):
"""Convert all graphs to unlinked instructions and map them."""
self._reset()
graph = wrap_primitives(graph)
graph = convert_grad(graph)
self.compile(graph)
graphs = graph.manager.graphs
for g in graphs - {graph}:
self.compile(g)
self.link()
res = FinalVM(self.instrs, self.transform.backend)
self._reset()
return res | def compile_and_link(self, graph):
"""Convert all graphs to unlinked instructions and map them."""
self._reset()
graph = wrap_primitives(graph)
graph = convert_grad(graph)
self.compile(graph)
graphs = graph.manager.graphs
for g in graphs - {graph}:
self.compile(g)
self.link()
res = FinalVM(self.instrs, self.transform.backend)
self._reset()
return res |
Python | def group_nodes(root, manager):
"""Group together all nodes that could be merged.
Some nodes in some groups may end up being unmergeable.
"""
hashes = {}
groups = defaultdict(list)
manager.add_graph(root)
for g in manager.graphs:
for node in toposort(g.return_, succ_incoming):
if node in hashes:
continue
if node.is_constant():
h = hash((node.value, node.abstract))
elif node.is_apply():
h = hash(tuple(hashes[inp] for inp in node.inputs))
elif node.is_parameter():
h = hash(node)
else: # pragma: no cover
raise TypeError(f"Unknown node type: {node}")
hashes[node] = h
groups[h, node.graph].append(node)
return groups | def group_nodes(root, manager):
"""Group together all nodes that could be merged.
Some nodes in some groups may end up being unmergeable.
"""
hashes = {}
groups = defaultdict(list)
manager.add_graph(root)
for g in manager.graphs:
for node in toposort(g.return_, succ_incoming):
if node in hashes:
continue
if node.is_constant():
h = hash((node.value, node.abstract))
elif node.is_apply():
h = hash(tuple(hashes[inp] for inp in node.inputs))
elif node.is_parameter():
h = hash(node)
else: # pragma: no cover
raise TypeError(f"Unknown node type: {node}")
hashes[node] = h
groups[h, node.graph].append(node)
return groups |
Python | async def infer_gather(
self,
engine,
input: lib.AbstractArray,
dim: xtype.UInt[64],
index: lib.AbstractArray,
):
"""Infer the return type of primitive `gather`."""
return type(input)(
input.element, {SHAPE: index.xshape(), TYPE: input.xtype()}
) | async def infer_gather(
self,
engine,
input: lib.AbstractArray,
dim: xtype.UInt[64],
index: lib.AbstractArray,
):
"""Infer the return type of primitive `gather`."""
return type(input)(
input.element, {SHAPE: index.xshape(), TYPE: input.xtype()}
) |
Python | def bprop_gather(x, dim, index, out, dout):
"""Backpropagator for primitive `gather`."""
z = zeros_like(x)
z = scatter_add(z, dim, index, dout)
return (z, zeros_like(dim), zeros_like(index)) | def bprop_gather(x, dim, index, out, dout):
"""Backpropagator for primitive `gather`."""
z = zeros_like(x)
z = scatter_add(z, dim, index, dout)
return (z, zeros_like(dim), zeros_like(index)) |
Python | async def infer_concat(self, engine, x, dim):
"""Infer the return type of primitive `concat`."""
dim_v = dim.xvalue()
new_dim_len = sum([e.xshape()[dim_v] for e in x.elements])
shp_0 = x.elements[0].xshape()
assert all(len(e.xshape()) == len(shp_0) for e in x.elements)
for d in range(len(shp_0)):
if d != dim_v:
assert all(e.xshape()[d] == shp_0[d] for e in x.elements)
shp_f = shp_0[:dim_v] + (new_dim_len,) + shp_0[dim_v + 1 :]
return type(x.elements[0])(
x.elements[0].element, {SHAPE: shp_f, TYPE: x.elements[0].xtype()}
) | async def infer_concat(self, engine, x, dim):
"""Infer the return type of primitive `concat`."""
dim_v = dim.xvalue()
new_dim_len = sum([e.xshape()[dim_v] for e in x.elements])
shp_0 = x.elements[0].xshape()
assert all(len(e.xshape()) == len(shp_0) for e in x.elements)
for d in range(len(shp_0)):
if d != dim_v:
assert all(e.xshape()[d] == shp_0[d] for e in x.elements)
shp_f = shp_0[:dim_v] + (new_dim_len,) + shp_0[dim_v + 1 :]
return type(x.elements[0])(
x.elements[0].element, {SHAPE: shp_f, TYPE: x.elements[0].xtype()}
) |
Python | async def _sect_dim(info, x_ref, dim_ref):
"""Returns shape of arrays along a dimension."""
x = await x_ref.get()
dim = build_value(await dim_ref.get())
sections = ()
for _x in x.elements:
sections = sections + (_x.xshape()[dim],)
return Constant(sections) | async def _sect_dim(info, x_ref, dim_ref):
"""Returns shape of arrays along a dimension."""
x = await x_ref.get()
dim = build_value(await dim_ref.get())
sections = ()
for _x in x.elements:
sections = sections + (_x.xshape()[dim],)
return Constant(sections) |
Python | def bprop_concat(x, dim, out, dout):
"""Backpropagator for primitive `concat`."""
_sections = _sect_dim(x, dim)
x_grad = split(dout, _sections, dim)
return (x_grad, zeros_like(dim)) | def bprop_concat(x, dim, out, dout):
"""Backpropagator for primitive `concat`."""
_sections = _sect_dim(x, dim)
x_grad = split(dout, _sections, dim)
return (x_grad, zeros_like(dim)) |
Python | def relay_distribute(c, array, shape):
"""Implementation of distribute for Relay."""
assert shape.is_constant(tuple)
# Make sure shape is a tuple of builtin Python integers.
relay_shape = tuple(int(dim) for dim in shape.value)
return relay.op.broadcast_to(c.ref(array), relay_shape) | def relay_distribute(c, array, shape):
"""Implementation of distribute for Relay."""
assert shape.is_constant(tuple)
# Make sure shape is a tuple of builtin Python integers.
relay_shape = tuple(int(dim) for dim in shape.value)
return relay.op.broadcast_to(c.ref(array), relay_shape) |
Python | def relay_reshape(c, v, shp):
"""Implementation of reshape for Relay."""
nv = c.ref(v)
assert shp.is_constant(tuple)
trim = False
if shp.value == ():
shp = (1,)
trim = True
else:
shp = shp.value
res = relay.op.reshape(nv, newshape=shp)
if trim:
res = relay.op.take(res, relay.const(0), mode="fast")
return res | def relay_reshape(c, v, shp):
"""Implementation of reshape for Relay."""
nv = c.ref(v)
assert shp.is_constant(tuple)
trim = False
if shp.value == ():
shp = (1,)
trim = True
else:
shp = shp.value
res = relay.op.reshape(nv, newshape=shp)
if trim:
res = relay.op.take(res, relay.const(0), mode="fast")
return res |
Python | def relay_array_map(c, fn, *array):
"""Implementation of array_map for Relay."""
assert fn.is_constant(Primitive)
fn = fn.value
if fn is P.switch:
rfn = relay.where
else:
rfn = SIMPLE_MAP[fn]
return rfn(*[c.ref(a) for a in array]) | def relay_array_map(c, fn, *array):
"""Implementation of array_map for Relay."""
assert fn.is_constant(Primitive)
fn = fn.value
if fn is P.switch:
rfn = relay.where
else:
rfn = SIMPLE_MAP[fn]
return rfn(*[c.ref(a) for a in array]) |
Python | def relay_array_reduce(c, fn, array, shape):
"""Implementation of array_reduce for Relay."""
assert fn.is_constant(Primitive)
assert shape.is_constant(tuple)
fn = fn.value
tshp = shape.value
ary = c.ref(array)
if fn == P.scalar_add:
ashp = ashape(array)
if len(tshp) < len(ashp):
ts = (1,) * (len(ashp) - len(tshp)) + tshp
else:
ts = tshp
axis = tuple(i for i, t in enumerate(ts) if t == 1)
res = relay.op.sum(ary, axis=axis, keepdims=True)
if len(tshp) < len(ashp):
rtshp = tshp
if tshp == ():
tshp = (1,)
res = relay.op.reshape(res, newshape=tshp)
if rtshp == ():
res = relay.op.take(res, relay.const(0))
return res
elif fn == P.scalar_mul:
ashp = ashape(array)
if len(tshp) in (0, len(ashp)):
res = relay.op.prod(ary)
else:
raise NotImplementedError(
"We currently support only full product on an array."
)
return res
else:
raise NotImplementedError(f"reduce with {fn}") | def relay_array_reduce(c, fn, array, shape):
"""Implementation of array_reduce for Relay."""
assert fn.is_constant(Primitive)
assert shape.is_constant(tuple)
fn = fn.value
tshp = shape.value
ary = c.ref(array)
if fn == P.scalar_add:
ashp = ashape(array)
if len(tshp) < len(ashp):
ts = (1,) * (len(ashp) - len(tshp)) + tshp
else:
ts = tshp
axis = tuple(i for i, t in enumerate(ts) if t == 1)
res = relay.op.sum(ary, axis=axis, keepdims=True)
if len(tshp) < len(ashp):
rtshp = tshp
if tshp == ():
tshp = (1,)
res = relay.op.reshape(res, newshape=tshp)
if rtshp == ():
res = relay.op.take(res, relay.const(0))
return res
elif fn == P.scalar_mul:
ashp = ashape(array)
if len(tshp) in (0, len(ashp)):
res = relay.op.prod(ary)
else:
raise NotImplementedError(
"We currently support only full product on an array."
)
return res
else:
raise NotImplementedError(f"reduce with {fn}") |
Python | def relay_casttag(c, x, tag):
"""Implementation of casttag for Relay."""
assert tag.is_constant(int)
rtag = get_union_ctr(tag.value, x.abstract.options.get(tag.value))
v = relay.Var("v")
clause = adt.Clause(adt.PatternConstructor(rtag, [adt.PatternVar(v)]), v)
return adt.Match(c.ref(x), [clause], complete=False) | def relay_casttag(c, x, tag):
"""Implementation of casttag for Relay."""
assert tag.is_constant(int)
rtag = get_union_ctr(tag.value, x.abstract.options.get(tag.value))
v = relay.Var("v")
clause = adt.Clause(adt.PatternConstructor(rtag, [adt.PatternVar(v)]), v)
return adt.Match(c.ref(x), [clause], complete=False) |
Python | def relay_hastag(c, x, tag):
"""Implementation of hastag for Relay."""
assert tag.is_constant(int)
rtag = get_union_ctr(tag.value, x.abstract.options.get(tag.value))
t_clause = adt.Clause(
adt.PatternConstructor(rtag, [adt.PatternWildcard()]), relay.const(True)
)
f_clause = adt.Clause(adt.PatternWildcard(), relay.const(False))
return adt.Match(c.ref(x), [t_clause, f_clause]) | def relay_hastag(c, x, tag):
"""Implementation of hastag for Relay."""
assert tag.is_constant(int)
rtag = get_union_ctr(tag.value, x.abstract.options.get(tag.value))
t_clause = adt.Clause(
adt.PatternConstructor(rtag, [adt.PatternWildcard()]), relay.const(True)
)
f_clause = adt.Clause(adt.PatternWildcard(), relay.const(False))
return adt.Match(c.ref(x), [t_clause, f_clause]) |
Python | def relay_tagged(c, x, tag):
"""Implementation of tagged for Relay."""
assert tag.is_constant(int)
rtag = get_union_ctr(tag.value, None)
return rtag(c.ref(x)) | def relay_tagged(c, x, tag):
"""Implementation of tagged for Relay."""
assert tag.is_constant(int)
rtag = get_union_ctr(tag.value, None)
return rtag(c.ref(x)) |
Python | def relay_unsafe_static_cast(c, val, ty):
"""Implementation of unsafe_static_cast for Relay."""
assert ty.is_constant(AbstractTaggedUnion)
assert isinstance(val.abstract, AbstractTaggedUnion)
return c.ref(val) | def relay_unsafe_static_cast(c, val, ty):
"""Implementation of unsafe_static_cast for Relay."""
assert ty.is_constant(AbstractTaggedUnion)
assert isinstance(val.abstract, AbstractTaggedUnion)
return c.ref(val) |
Python | def relay_array_getitem(c, a, start, stop, strides):
"""Implementation of array_getitem for Relay."""
assert start.is_constant(tuple)
assert stop.is_constant(tuple)
assert strides.is_constant(tuple)
return relay.op.transform.strided_slice(
c.ref(a), start.value, stop.value, strides.value
) | def relay_array_getitem(c, a, start, stop, strides):
"""Implementation of array_getitem for Relay."""
assert start.is_constant(tuple)
assert stop.is_constant(tuple)
assert strides.is_constant(tuple)
return relay.op.transform.strided_slice(
c.ref(a), start.value, stop.value, strides.value
) |
Python | def relay_argmax(c, v, dims):
"""Implementation of argmax for Relay."""
v = c.ref(v)
assert dims.is_constant(tuple)
return relay.cast(relay.argmax(v, axis=dims.value, keepdims=True), "int64") | def relay_argmax(c, v, dims):
"""Implementation of argmax for Relay."""
v = c.ref(v)
assert dims.is_constant(tuple)
return relay.cast(relay.argmax(v, axis=dims.value, keepdims=True), "int64") |
Python | def relay_conv_transpose2d(
c, input, weight, stride, padding, output_padding, groups, dilation
):
"""Implement conv2d_transpose using 10 relay calls including conv2d.
Support all values for groups, dilation, strides, padding and
output padding.
Based on Theano implementation (2020/04/14):
https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py#L2927
Need implementation of operation relay.nn.dilate
in TVM relay backend
"""
assert stride.is_constant(tuple)
assert padding.is_constant(tuple)
assert output_padding.is_constant(tuple)
assert dilation.is_constant(tuple)
assert groups.is_constant(int)
data_shape = input.abstract.xshape()
kern_shape = weight.abstract.xshape()
n, _, h_in, w_in = data_shape
filter_h, filter_w = kern_shape[2:]
strides = stride.value
padding = padding.value
dilation = dilation.value
output_padding = output_padding.value
groups = groups.value
data = c.ref(input)
weight = c.ref(weight)
h_out = (
(h_in - 1) * strides[0]
- 2 * padding[0]
+ dilation[0] * (filter_h - 1)
+ output_padding[0]
+ 1
)
w_out = (
(w_in - 1) * strides[1]
- 2 * padding[1]
+ dilation[1] * (filter_w - 1)
+ output_padding[1]
+ 1
)
data_dilated = relay.nn.dilate(data, (1, 1) + strides)
data_padded = relay.nn.pad(
data_dilated,
((0, 0), (0, 0), (0, output_padding[0]), (0, output_padding[1]),),
)
# Pre-process kernel,
# from (m0, m1, m2, m3) to (m1 * g, m0 // g, m2, m3).
mshp0 = kern_shape[0] // groups
c_out = kern_shape[1] * groups
kern = relay.reshape(weight, (groups, mshp0) + kern_shape[1:])
# => (g, m0 // g, m1, m2, m3)
kern = relay.op.transpose(kern, axes=(1, 0, 2, 3, 4))
# => (m0 // g, g, m1, m2, m3)
kern = relay.reshape(kern, (mshp0, c_out, kern_shape[-2], kern_shape[-1]))
# => (m0 // g, m1 * g, m2, m3)
kern = relay.op.transpose(kern, (1, 0, 2, 3))
# => (m1 * g, m0 // g, m2, m3)
# Kernel 2 latest dimensions must be flipped
kern = relay.op.transform.reverse(kern, 2)
kern = relay.op.transform.reverse(kern, 3)
# End pre-processing kernel.
img = relay.nn.conv2d(
data_padded,
kern,
groups=groups,
channels=c_out,
padding=[(kern_shape[2 + i] - 1) * dilation[i] for i in range(2)],
dilation=dilation,
)
if any(p != 0 for p in padding):
img = relay.op.transform.strided_slice(
data=img,
begin=[0, 0, padding[0], padding[1]],
end=[n + 1, c_out + 1, h_out + padding[0], w_out + padding[1]],
)
return img | def relay_conv_transpose2d(
c, input, weight, stride, padding, output_padding, groups, dilation
):
"""Implement conv2d_transpose using 10 relay calls including conv2d.
Support all values for groups, dilation, strides, padding and
output padding.
Based on Theano implementation (2020/04/14):
https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py#L2927
Need implementation of operation relay.nn.dilate
in TVM relay backend
"""
assert stride.is_constant(tuple)
assert padding.is_constant(tuple)
assert output_padding.is_constant(tuple)
assert dilation.is_constant(tuple)
assert groups.is_constant(int)
data_shape = input.abstract.xshape()
kern_shape = weight.abstract.xshape()
n, _, h_in, w_in = data_shape
filter_h, filter_w = kern_shape[2:]
strides = stride.value
padding = padding.value
dilation = dilation.value
output_padding = output_padding.value
groups = groups.value
data = c.ref(input)
weight = c.ref(weight)
h_out = (
(h_in - 1) * strides[0]
- 2 * padding[0]
+ dilation[0] * (filter_h - 1)
+ output_padding[0]
+ 1
)
w_out = (
(w_in - 1) * strides[1]
- 2 * padding[1]
+ dilation[1] * (filter_w - 1)
+ output_padding[1]
+ 1
)
data_dilated = relay.nn.dilate(data, (1, 1) + strides)
data_padded = relay.nn.pad(
data_dilated,
((0, 0), (0, 0), (0, output_padding[0]), (0, output_padding[1]),),
)
# Pre-process kernel,
# from (m0, m1, m2, m3) to (m1 * g, m0 // g, m2, m3).
mshp0 = kern_shape[0] // groups
c_out = kern_shape[1] * groups
kern = relay.reshape(weight, (groups, mshp0) + kern_shape[1:])
# => (g, m0 // g, m1, m2, m3)
kern = relay.op.transpose(kern, axes=(1, 0, 2, 3, 4))
# => (m0 // g, g, m1, m2, m3)
kern = relay.reshape(kern, (mshp0, c_out, kern_shape[-2], kern_shape[-1]))
# => (m0 // g, m1 * g, m2, m3)
kern = relay.op.transpose(kern, (1, 0, 2, 3))
# => (m1 * g, m0 // g, m2, m3)
# Kernel 2 latest dimensions must be flipped
kern = relay.op.transform.reverse(kern, 2)
kern = relay.op.transform.reverse(kern, 3)
# End pre-processing kernel.
img = relay.nn.conv2d(
data_padded,
kern,
groups=groups,
channels=c_out,
padding=[(kern_shape[2 + i] - 1) * dilation[i] for i in range(2)],
dilation=dilation,
)
if any(p != 0 for p in padding):
img = relay.op.transform.strided_slice(
data=img,
begin=[0, 0, padding[0], padding[1]],
end=[n + 1, c_out + 1, h_out + padding[0], w_out + padding[1]],
)
return img |
Python | def relay_random_initialize(c, ref_seed):
"""Create a random state for Philox2x32 RNG.
State is a couple (key, counter).
key is given seed, or a default value if seed is None.
counter starts with 0 and is incremented after each generation batch.
"""
assert ref_seed.is_constant(type(None)) or ref_seed.is_constant(int)
seed = ref_seed.value
key = relay.const(seed, "uint32")
counter = relay.const(0, "uint32")
rstate = relay.Tuple((key, counter))
return rstate | def relay_random_initialize(c, ref_seed):
"""Create a random state for Philox2x32 RNG.
State is a couple (key, counter).
key is given seed, or a default value if seed is None.
counter starts with 0 and is incremented after each generation batch.
"""
assert ref_seed.is_constant(type(None)) or ref_seed.is_constant(int)
seed = ref_seed.value
key = relay.const(seed, "uint32")
counter = relay.const(0, "uint32")
rstate = relay.Tuple((key, counter))
return rstate |
Python | def relay_random_uint32(c, ref_rstate, ref_shape):
"""Generate a random tensor using Philox2x32 RNG."""
assert ref_shape.is_constant(tuple)
shape = ref_shape.value
relay_state = c.ref(ref_rstate)
# Compute output size.
output_size = 1
for dim in shape:
output_size *= dim
# Generate random uint32 values.
key = relay.TupleGetItem(relay_state, 0)
counter = relay.TupleGetItem(relay_state, 1)
impl = relay_philox.Philox2x32(output_size)
ctr = impl.generate_relay_counter_array(counter)
random = impl.philox_2x(ctr, key)
# Reshape vector to expected shape.
if shape:
# Reshape vector to output shape.
random = relay.op.reshape(random, shape)
else:
# Convert 1-element vector to scalar
random = relay.op.take(random, relay.const(0), mode="fast")
# Generate next state: same key, counter + 1
next_rstate = relay.Tuple(
(key, relay.add(counter, relay.const(1, "uint32")))
)
# Return next state and random tensor.
return relay.Tuple((next_rstate, random)) | def relay_random_uint32(c, ref_rstate, ref_shape):
"""Generate a random tensor using Philox2x32 RNG."""
assert ref_shape.is_constant(tuple)
shape = ref_shape.value
relay_state = c.ref(ref_rstate)
# Compute output size.
output_size = 1
for dim in shape:
output_size *= dim
# Generate random uint32 values.
key = relay.TupleGetItem(relay_state, 0)
counter = relay.TupleGetItem(relay_state, 1)
impl = relay_philox.Philox2x32(output_size)
ctr = impl.generate_relay_counter_array(counter)
random = impl.philox_2x(ctr, key)
# Reshape vector to expected shape.
if shape:
# Reshape vector to output shape.
random = relay.op.reshape(random, shape)
else:
# Convert 1-element vector to scalar
random = relay.op.take(random, relay.const(0), mode="fast")
# Generate next state: same key, counter + 1
next_rstate = relay.Tuple(
(key, relay.add(counter, relay.const(1, "uint32")))
)
# Return next state and random tensor.
return relay.Tuple((next_rstate, random)) |
Python | def run(self, graph, context, target, exec_kind):
"""Convert the graph into a relay callable."""
mng = manage(graph)
graph, handles_params = return_handles(graph)
mng.keep_roots(graph)
self.module = tvm.IRModule({})
self.types = TypeHelper()
self.types.initialize(self.module, mng)
self.make_const = RelayConstantConverter(context, self.types)
self.universe_helper = None
self.i = 0
# Analyze and create a global union type of all the possible types
# and then use it for all union values.
function_map = {}
self.node_map = {}
self.graph_map = {}
for g in mng.graphs:
if g.parent is None:
if g is graph:
self.graph_map[g] = relay.GlobalVar("main")
else:
# Mangle user names
name = "_" + g.debug.debug_name
self.graph_map[g] = relay.GlobalVar(name)
for g in self.graph_map.keys():
function_map[self.graph_map[g]] = self.convert_func(g)
add_functions(self.module, function_map)
vm = relay.create_executor(
mod=self.module, ctx=context, target=target, kind=exec_kind
)
res = vm.evaluate()
fill_reverse_tag_map()
res = handle_wrapper(res, handles_params)
return res | def run(self, graph, context, target, exec_kind):
"""Convert the graph into a relay callable."""
mng = manage(graph)
graph, handles_params = return_handles(graph)
mng.keep_roots(graph)
self.module = tvm.IRModule({})
self.types = TypeHelper()
self.types.initialize(self.module, mng)
self.make_const = RelayConstantConverter(context, self.types)
self.universe_helper = None
self.i = 0
# Analyze and create a global union type of all the possible types
# and then use it for all union values.
function_map = {}
self.node_map = {}
self.graph_map = {}
for g in mng.graphs:
if g.parent is None:
if g is graph:
self.graph_map[g] = relay.GlobalVar("main")
else:
# Mangle user names
name = "_" + g.debug.debug_name
self.graph_map[g] = relay.GlobalVar(name)
for g in self.graph_map.keys():
function_map[self.graph_map[g]] = self.convert_func(g)
add_functions(self.module, function_map)
vm = relay.create_executor(
mod=self.module, ctx=context, target=target, kind=exec_kind
)
res = vm.evaluate()
fill_reverse_tag_map()
res = handle_wrapper(res, handles_params)
return res |
Python | def convert_scalar(self, v, t):
"""Convert the scalar to a TVM array."""
return tvm.runtime.ndarray.array(
getattr(np, type_to_np_dtype(t))(v), self.context
) | def convert_scalar(self, v, t):
"""Convert the scalar to a TVM array."""
return tvm.runtime.ndarray.array(
getattr(np, type_to_np_dtype(t))(v), self.context
) |
Python | async def infer_dot(self, engine, a: AbstractArray, b: AbstractArray):
"""Infer the return type of primitive `dot`."""
a_shp = a.xshape()
b_shp = b.xshape()
if len(a_shp) != 2 or len(b_shp) != 2:
raise MyiaShapeError("dot needs matrix inputs")
if (
a_shp[1] != b_shp[0]
and a_shp[1] is not ANYTHING
and b_shp[0] is not ANYTHING
):
raise MyiaShapeError(f"Incompatible shapes in dot: {a_shp} and {b_shp}")
engine.abstract_merge(a.element, b.element)
c_shp = (a_shp[0], b_shp[1])
if a.xtype() != b.xtype():
raise MyiaTypeError(
f"Expect array of type {a.xtype()} "
f"to have same type as array of type {b.xtype()}"
)
return type(a)(a.element, {SHAPE: c_shp, TYPE: a.xtype()}) | async def infer_dot(self, engine, a: AbstractArray, b: AbstractArray):
"""Infer the return type of primitive `dot`."""
a_shp = a.xshape()
b_shp = b.xshape()
if len(a_shp) != 2 or len(b_shp) != 2:
raise MyiaShapeError("dot needs matrix inputs")
if (
a_shp[1] != b_shp[0]
and a_shp[1] is not ANYTHING
and b_shp[0] is not ANYTHING
):
raise MyiaShapeError(f"Incompatible shapes in dot: {a_shp} and {b_shp}")
engine.abstract_merge(a.element, b.element)
c_shp = (a_shp[0], b_shp[1])
if a.xtype() != b.xtype():
raise MyiaTypeError(
f"Expect array of type {a.xtype()} "
f"to have same type as array of type {b.xtype()}"
)
return type(a)(a.element, {SHAPE: c_shp, TYPE: a.xtype()}) |
Python | def to_numpy(self, x):
"""Convert torch Tensor x to numpy."""
import torch
if not isinstance(x, torch.Tensor):
raise MyiaInputTypeError(f"Expected torch.Tensor but got {x}.")
return x.detach().cpu().numpy() | def to_numpy(self, x):
"""Convert torch Tensor x to numpy."""
import torch
if not isinstance(x, torch.Tensor):
raise MyiaInputTypeError(f"Expected torch.Tensor but got {x}.")
return x.detach().cpu().numpy() |
Python | def from_numpy(self, x):
"""Convert numpy array x to a torch Tensor."""
import torch
return torch.from_numpy(x) | def from_numpy(self, x):
"""Convert numpy array x to a torch Tensor."""
import torch
return torch.from_numpy(x) |
Python | def user_defined_version(self):
"""Return the user-defined version of this type.
This uses the attribute types as defined by the user, rather than what
is generated by the inferrer or other methods.
Current default is to return self in order to make it easier for Myia
hypermap mapping function to return a different type from its input
(especially for pytorch modules and their contents).
"""
return AbstractModule(
self.tag,
{attr: ANYTHING for attr in self.attributes},
constructor=self.constructor,
) | def user_defined_version(self):
"""Return the user-defined version of this type.
This uses the attribute types as defined by the user, rather than what
is generated by the inferrer or other methods.
Current default is to return self in order to make it easier for Myia
hypermap mapping function to return a different type from its input
(especially for pytorch modules and their contents).
"""
return AbstractModule(
self.tag,
{attr: ANYTHING for attr in self.attributes},
constructor=self.constructor,
) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.