language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def gen_fv_extended(self, g, ng, node):
"""Generate sensitivities for free variables.
Note that the default gen_fv_extended does nothing, so this is
different behavior.
"""
with About(node.debug, self.relation):
self.remap_node((g, node), g, node, ng, ng.apply()) | def gen_fv_extended(self, g, ng, node):
"""Generate sensitivities for free variables.
Note that the default gen_fv_extended does nothing, so this is
different behavior.
"""
with About(node.debug, self.relation):
self.remap_node((g, node), g, node, ng, ng.apply()) |
Python | def finalize_graph(self, g, ng):
"""Generate the output of the backprop graph.
* Sensitivities for all free variables are packed in an
EnvInstance using env_setitem.
* We return a tuple with fv sensitivities first, and then
all parameter sensitivities.
"""
fv_sens = Constant(newenv)
for fv in g.free_variables_extended:
sens = self.get(g, fv)
if sens.is_apply(zeros_like):
# Skip if there is no gradient
continue
fv_sens = ng.apply(
P.env_setitem,
fv_sens,
ng.apply(operations.embed, self.grad_fprop.get_jinv(fv)),
sens,
)
in_sens = [self.get(g, p) for p in g.parameters]
ng.output = ng.apply(P.make_tuple, fv_sens, *in_sens)
if len(ng.parameters) == 0:
# This can happen if the output is a constant. In that case we just
# add a dummy parameter to satisfy the backpropagator protocol.
with About(g.output.debug, "grad_sens"):
ng.add_parameter() | def finalize_graph(self, g, ng):
"""Generate the output of the backprop graph.
* Sensitivities for all free variables are packed in an
EnvInstance using env_setitem.
* We return a tuple with fv sensitivities first, and then
all parameter sensitivities.
"""
fv_sens = Constant(newenv)
for fv in g.free_variables_extended:
sens = self.get(g, fv)
if sens.is_apply(zeros_like):
# Skip if there is no gradient
continue
fv_sens = ng.apply(
P.env_setitem,
fv_sens,
ng.apply(operations.embed, self.grad_fprop.get_jinv(fv)),
sens,
)
in_sens = [self.get(g, p) for p in g.parameters]
ng.output = ng.apply(P.make_tuple, fv_sens, *in_sens)
if len(ng.parameters) == 0:
# This can happen if the output is a constant. In that case we just
# add a dummy parameter to satisfy the backpropagator protocol.
with About(g.output.debug, "grad_sens"):
ng.add_parameter() |
Python | def Jimpl(prim: Primitive, resources, node):
"""Implement J on a Primitive."""
try:
g = resources.grad_implementations[prim]
err = False
except KeyError: # pragma: no cover
err = True
if g is None:
err = True
if err:
raise InternalInferenceError(
f"Missing a backpropagator for primitive '{prim}'", refs=[node]
)
return resources.convert(g, manage=False) | def Jimpl(prim: Primitive, resources, node):
"""Implement J on a Primitive."""
try:
g = resources.grad_implementations[prim]
err = False
except KeyError: # pragma: no cover
err = True
if g is None:
err = True
if err:
raise InternalInferenceError(
f"Missing a backpropagator for primitive '{prim}'", refs=[node]
)
return resources.convert(g, manage=False) |
Python | def _make_grad_transform(prim, fn, flags):
"""Given a function for the bprop, make the augmented function."""
from .pipeline import standard_parse
info = NamedDebugInfo(prim=prim, name=prim.name)
bprop = clone(standard_parse(fn))
bprop.flags.update(default_grad_flags)
bprop.debug.name = None
bprop.debug.about = About(info, "grad_bprop") # type: ignore
if bprop.output.match(_is_raise):
pass
elif bprop.output.match(_is_mktuple_resolve) or bprop.output.match(
_is_mktuple_direct
):
bprop.output = bprop.apply(
P.make_tuple, newenv, *bprop.output.inputs[1:]
)
else:
raise InternalInferenceError(
f"The backpropagator for {prim} is not defined properly. "
f"It should return a tuple literal.",
refs=[bprop.return_],
)
*args, out_param, dout = bprop.parameters
with About(info, "grad_fprop"):
outer = Graph()
outer.flags.update(default_grad_flags)
outer.flags.update(flags)
outer.transforms["primal"] = prim
outer.output = Constant(None)
mng = manage(bprop, outer)
transf_args = []
for p in args:
with About(p.debug, "grad_fprop"):
outer_p = outer.add_parameter()
with About(p.debug, "equiv"):
transf_p = outer.apply(P.Jinv, outer_p)
mng.replace(p, transf_p)
transf_args.append(transf_p)
with About(out_param.debug, "equiv"):
out_value = outer.apply(prim, *transf_args)
mng.replace(out_param, out_value)
with About(out_param.debug, "grad_sens"):
new_dout = bprop.add_parameter()
mng.replace(dout, new_dout)
# We remove all parameters except new_dout
bprop.parameters = [new_dout]
result = outer.apply(P.J, out_value)
outer.output = outer.apply(P.make_tuple, result, bprop)
return clone(outer) | def _make_grad_transform(prim, fn, flags):
"""Given a function for the bprop, make the augmented function."""
from .pipeline import standard_parse
info = NamedDebugInfo(prim=prim, name=prim.name)
bprop = clone(standard_parse(fn))
bprop.flags.update(default_grad_flags)
bprop.debug.name = None
bprop.debug.about = About(info, "grad_bprop") # type: ignore
if bprop.output.match(_is_raise):
pass
elif bprop.output.match(_is_mktuple_resolve) or bprop.output.match(
_is_mktuple_direct
):
bprop.output = bprop.apply(
P.make_tuple, newenv, *bprop.output.inputs[1:]
)
else:
raise InternalInferenceError(
f"The backpropagator for {prim} is not defined properly. "
f"It should return a tuple literal.",
refs=[bprop.return_],
)
*args, out_param, dout = bprop.parameters
with About(info, "grad_fprop"):
outer = Graph()
outer.flags.update(default_grad_flags)
outer.flags.update(flags)
outer.transforms["primal"] = prim
outer.output = Constant(None)
mng = manage(bprop, outer)
transf_args = []
for p in args:
with About(p.debug, "grad_fprop"):
outer_p = outer.add_parameter()
with About(p.debug, "equiv"):
transf_p = outer.apply(P.Jinv, outer_p)
mng.replace(p, transf_p)
transf_args.append(transf_p)
with About(out_param.debug, "equiv"):
out_value = outer.apply(prim, *transf_args)
mng.replace(out_param, out_value)
with About(out_param.debug, "grad_sens"):
new_dout = bprop.add_parameter()
mng.replace(dout, new_dout)
# We remove all parameters except new_dout
bprop.parameters = [new_dout]
result = outer.apply(P.J, out_value)
outer.output = outer.apply(P.make_tuple, result, bprop)
return clone(outer) |
Python | def wrap_grad_transform(prim):
"""Wrap the grad transform for prim."""
from .pipeline import standard_parse
def deco(fn):
g = standard_parse(fn)
for g2 in manage(g, weak=True).graphs:
name = short_labeler.name(g2)
name = name.replace("__fprop__", syms["grad_fprop"])
g2.debug.name = name.replace("__bprop__", syms["grad_bprop"])
g2.flags.update(default_grad_flags)
g.transforms["primal"] = prim
return g
return deco | def wrap_grad_transform(prim):
"""Wrap the grad transform for prim."""
from .pipeline import standard_parse
def deco(fn):
g = standard_parse(fn)
for g2 in manage(g, weak=True).graphs:
name = short_labeler.name(g2)
name = name.replace("__fprop__", syms["grad_fprop"])
g2.debug.name = name.replace("__bprop__", syms["grad_bprop"])
g2.flags.update(default_grad_flags)
g.transforms["primal"] = prim
return g
return deco |
Python | def bprop_to_grad_transform(prim, **flags):
"""Create the grad transform of a function from a bprop function."""
def deco(fn):
return _make_grad_transform(prim, fn, flags)
return deco | def bprop_to_grad_transform(prim, **flags):
"""Create the grad transform of a function from a bprop function."""
def deco(fn):
return _make_grad_transform(prim, fn, flags)
return deco |
Python | async def infer_conv2d(
self,
engine,
input: AbstractArray,
weight: AbstractArray,
stride: u64pair_typecheck,
padding: u64pair_typecheck,
dilation: u64pair_typecheck,
groups: xtype.UInt[64],
):
"""Infer the return type of primitive `conv2d`."""
# TODO: _shape_type should not allow float to be converted to uint
# TODO: "groups: UInt[64]" should not allow float to be converted to uint
h_in, w_in = input.xshape()[2:]
kernel_size = weight.xshape()[2:]
stride = tuple(
self.require_constant(e, argnum=f'"2:stride[{edx}]"')
for edx, e in enumerate(stride.elements)
)
padding = tuple(
self.require_constant(e, argnum=f'"3:padding[{edx}]"')
for edx, e in enumerate(padding.elements)
)
dilation = tuple(
self.require_constant(e, argnum=f'"4:dilation[{edx}]"')
for edx, e in enumerate(dilation.elements)
)
N = input.xshape()[0]
C_out = weight.xshape()[0]
# Based on formulae in shape section of:
# https://pytorch.org/docs/stable/nn.html#conv2d
H_out = (
(h_in + 2 * padding[0] - dilation[0] * (kernel_size[0] - 1) - 1)
// stride[0]
) + 1
W_out = (
(w_in + 2 * padding[1] - dilation[1] * (kernel_size[1] - 1) - 1)
// stride[1]
) + 1
out_shape = (N, C_out, int(H_out), int(W_out))
# Checks all elements of input have same xtype as all elements of weight
engine.check(AbstractScalar, input.element, weight.element)
# ^ TODO: PyTorch also enforces, but might want to change for mixed precis
return type(weight)(
weight.element, {SHAPE: out_shape, TYPE: weight.xtype()}
) | async def infer_conv2d(
self,
engine,
input: AbstractArray,
weight: AbstractArray,
stride: u64pair_typecheck,
padding: u64pair_typecheck,
dilation: u64pair_typecheck,
groups: xtype.UInt[64],
):
"""Infer the return type of primitive `conv2d`."""
# TODO: _shape_type should not allow float to be converted to uint
# TODO: "groups: UInt[64]" should not allow float to be converted to uint
h_in, w_in = input.xshape()[2:]
kernel_size = weight.xshape()[2:]
stride = tuple(
self.require_constant(e, argnum=f'"2:stride[{edx}]"')
for edx, e in enumerate(stride.elements)
)
padding = tuple(
self.require_constant(e, argnum=f'"3:padding[{edx}]"')
for edx, e in enumerate(padding.elements)
)
dilation = tuple(
self.require_constant(e, argnum=f'"4:dilation[{edx}]"')
for edx, e in enumerate(dilation.elements)
)
N = input.xshape()[0]
C_out = weight.xshape()[0]
# Based on formulae in shape section of:
# https://pytorch.org/docs/stable/nn.html#conv2d
H_out = (
(h_in + 2 * padding[0] - dilation[0] * (kernel_size[0] - 1) - 1)
// stride[0]
) + 1
W_out = (
(w_in + 2 * padding[1] - dilation[1] * (kernel_size[1] - 1) - 1)
// stride[1]
) + 1
out_shape = (N, C_out, int(H_out), int(W_out))
# Checks all elements of input have same xtype as all elements of weight
engine.check(AbstractScalar, input.element, weight.element)
# ^ TODO: PyTorch also enforces, but might want to change for mixed precis
return type(weight)(
weight.element, {SHAPE: out_shape, TYPE: weight.xtype()}
) |
Python | async def infer_universe_getitem(
self, engine, universe: xtype.UniverseType, handle: lib.AbstractHandle
):
"""Infer the return type of primitive `universe_getitem`."""
return broaden(handle.element) | async def infer_universe_getitem(
self, engine, universe: xtype.UniverseType, handle: lib.AbstractHandle
):
"""Infer the return type of primitive `universe_getitem`."""
return broaden(handle.element) |
Python | async def infer_partial(self, engine, fn, *args):
"""Infer the return type of primitive `partial`."""
fns = await fn.get()
assert isinstance(fns, Possibilities)
return AbstractFunction(*[PartialApplication(fn, list(args)) for fn in fns]) | async def infer_partial(self, engine, fn, *args):
"""Infer the return type of primitive `partial`."""
fns = await fn.get()
assert isinstance(fns, Possibilities)
return AbstractFunction(*[PartialApplication(fn, list(args)) for fn in fns]) |
Python | def pattern_replacer(*pattern, interest=False):
"""Create a PatternSubstitutionOptimization using this function."""
if len(pattern) == 2 and pattern[0] == "just":
pattern = pattern[1]
def deco(f):
return PatternSubstitutionOptimization(
pattern, f, name=f.__name__, interest=interest
)
return deco | def pattern_replacer(*pattern, interest=False):
"""Create a PatternSubstitutionOptimization using this function."""
if len(pattern) == 2 and pattern[0] == "just":
pattern = pattern[1]
def deco(f):
return PatternSubstitutionOptimization(
pattern, f, name=f.__name__, interest=interest
)
return deco |
Python | def register(self, interests, opt=None):
"""Register an optimizer for some interests."""
def do_register(opt):
ints = interests
if ints is None:
self._d.setdefault(None, []).append(opt)
return
if not isinstance(ints, tuple):
ints = (ints,)
for interest in ints:
assert isinstance(interest, Primitive) or (
interest in (Graph, Apply)
)
self._d.setdefault(interest, []).append(opt)
# There could be the option to return do_register also.
do_register(opt) | def register(self, interests, opt=None):
"""Register an optimizer for some interests."""
def do_register(opt):
ints = interests
if ints is None:
self._d.setdefault(None, []).append(opt)
return
if not isinstance(ints, tuple):
ints = (ints,)
for interest in ints:
assert isinstance(interest, Primitive) or (
interest in (Graph, Apply)
)
self._d.setdefault(interest, []).append(opt)
# There could be the option to return do_register also.
do_register(opt) |
Python | def apply_opt(self, resources, mng, n):
"""Apply optimizations passes according to the node map."""
loop = True
changes = False
while loop:
loop = False
for transformer in self.node_map.get(n):
args = dict(opt=transformer, node=n, manager=mng, profile=False)
with tracer("opt", **args) as tr:
tr.set_results(success=False, **args)
with About(n.debug, "opt", transformer.name):
new = transformer(resources, n)
if new is not None and new is not n:
tracer().emit_match(**args, new_node=new)
if new is True:
changes = True
continue
if new and new is not n:
mng.replace(n, new)
tracer().emit_success(**args, new_node=new)
tr.set_results(success=True, **args)
if resources and resources.live_inferrer:
resources.live_inferrer()
n = new
loop = True
changes = True
break
return n, changes | def apply_opt(self, resources, mng, n):
"""Apply optimizations passes according to the node map."""
loop = True
changes = False
while loop:
loop = False
for transformer in self.node_map.get(n):
args = dict(opt=transformer, node=n, manager=mng, profile=False)
with tracer("opt", **args) as tr:
tr.set_results(success=False, **args)
with About(n.debug, "opt", transformer.name):
new = transformer(resources, n)
if new is not None and new is not n:
tracer().emit_match(**args, new_node=new)
if new is True:
changes = True
continue
if new and new is not n:
mng.replace(n, new)
tracer().emit_success(**args, new_node=new)
tr.set_results(success=True, **args)
if resources and resources.live_inferrer:
resources.live_inferrer()
n = new
loop = True
changes = True
break
return n, changes |
Python | async def infer_array_reduce(
self,
engine,
fn: AbstractFunctionBase,
a: AbstractArray,
shp: u64tup_typecheck,
):
"""Infer the return type of primitive `array_reduce`."""
shp_i = await force_pending(a.xshape())
shp_v = build_value(shp, default=ANYTHING)
if shp_v == ANYTHING:
raise AssertionError(
"We currently require knowing the shape for reduce."
)
# return (ANYTHING,) * (len(shp_i) - 1)
else:
delta = len(shp_i) - len(shp_v)
if delta < 0 or any(
1 != s1 != ANYTHING and 1 != s2 != ANYTHING and s1 != s2
for s1, s2 in zip(shp_i[delta:], shp_v)
):
raise MyiaShapeError(
f"Incompatible dims for reduce: {shp_i}, {shp_v}"
)
res = await engine.execute(fn, a.element, a.element)
return type(a)(res, {SHAPE: shp_v, TYPE: a.xtype()}) | async def infer_array_reduce(
self,
engine,
fn: AbstractFunctionBase,
a: AbstractArray,
shp: u64tup_typecheck,
):
"""Infer the return type of primitive `array_reduce`."""
shp_i = await force_pending(a.xshape())
shp_v = build_value(shp, default=ANYTHING)
if shp_v == ANYTHING:
raise AssertionError(
"We currently require knowing the shape for reduce."
)
# return (ANYTHING,) * (len(shp_i) - 1)
else:
delta = len(shp_i) - len(shp_v)
if delta < 0 or any(
1 != s1 != ANYTHING and 1 != s2 != ANYTHING and s1 != s2
for s1, s2 in zip(shp_i[delta:], shp_v)
):
raise MyiaShapeError(
f"Incompatible dims for reduce: {shp_i}, {shp_v}"
)
res = await engine.execute(fn, a.element, a.element)
return type(a)(res, {SHAPE: shp_v, TYPE: a.xtype()}) |
Python | async def infer_array_cast(
self, engine, a: lib.AbstractArray, typ: lib.AbstractType
):
"""Infer the return type of primitive `array_cast`."""
scal = typ.element
if not isinstance(scal, lib.AbstractScalar):
raise MyiaTypeError("array_cast must cast to a scalar dtype")
t = scal.xtype()
engine.check(xtype.Number, t)
e_values = {**a.element.values, lib.TYPE: t}
return lib.AbstractArray(lib.AbstractScalar(e_values), a.values) | async def infer_array_cast(
self, engine, a: lib.AbstractArray, typ: lib.AbstractType
):
"""Infer the return type of primitive `array_cast`."""
scal = typ.element
if not isinstance(scal, lib.AbstractScalar):
raise MyiaTypeError("array_cast must cast to a scalar dtype")
t = scal.xtype()
engine.check(xtype.Number, t)
e_values = {**a.element.values, lib.TYPE: t}
return lib.AbstractArray(lib.AbstractScalar(e_values), a.values) |
Python | async def infer_take(self, engine, inp: AbstractArray, indices: AbstractArray):
"""Infer the return type of primitive `take`."""
indices_shape = tuple(await force_pending(indices.xshape()))
inp_shape = tuple(await force_pending(inp.xshape()))
assert len(inp_shape) == 2
output_shape = indices_shape + (inp_shape[1],)
return AbstractArray(
inp.element,
{SHAPE: output_shape, TYPE: await force_pending(inp.xtype())},
) | async def infer_take(self, engine, inp: AbstractArray, indices: AbstractArray):
"""Infer the return type of primitive `take`."""
indices_shape = tuple(await force_pending(indices.xshape()))
inp_shape = tuple(await force_pending(inp.xshape()))
assert len(inp_shape) == 2
output_shape = indices_shape + (inp_shape[1],)
return AbstractArray(
inp.element,
{SHAPE: output_shape, TYPE: await force_pending(inp.xtype())},
) |
Python | def bprop_take(inp, indices, out, dout):
"""Backpropagator for primitive `take`."""
return (
P.take_grad_inp(P.shape(inp)[0], indices, dout),
zeros_like(indices),
) | def bprop_take(inp, indices, out, dout):
"""Backpropagator for primitive `take`."""
return (
P.take_grad_inp(P.shape(inp)[0], indices, dout),
zeros_like(indices),
) |
Python | def string_to_np_dtype(string):
"""Convert given string to numpy d-type. Return None if parsing failed."""
try:
# If Numpy cannot parse given string, it will raise a TypeError.
np_dtype = np.dtype(string)
except TypeError:
pass
else:
# We accept only:
# - booleans,
# - signed integers,
# - unsigned integers,
# - floating values
# - complex values.
if np_dtype.kind in "biufc":
return np_dtype.type
return None | def string_to_np_dtype(string):
"""Convert given string to numpy d-type. Return None if parsing failed."""
try:
# If Numpy cannot parse given string, it will raise a TypeError.
np_dtype = np.dtype(string)
except TypeError:
pass
else:
# We accept only:
# - booleans,
# - signed integers,
# - unsigned integers,
# - floating values
# - complex values.
if np_dtype.kind in "biufc":
return np_dtype.type
return None |
Python | async def to_scalar_type(info, data):
"""Convert given data to abstract scalar.
Arguments:
data: arbitrary data to convert
Returns:
an abstract scalar object if data can be converted,
otherwise raise an exception.
"""
sync_data = await data.get()
# We expect either:
# - an abstract type containing an abstract scalar with scalar type
# - an abstract scalar containing a string to be parsed to a scalar type
if isinstance(sync_data, AbstractType):
abstract_scalar = sync_data.element
if isinstance(abstract_scalar, AbstractScalar):
xtype = await force_pending(abstract_scalar.xtype())
if inspect.isclass(xtype) and issubclass(xtype, Number):
return Constant(abstract_scalar)
elif isinstance(sync_data, AbstractScalar):
xtype = await force_pending(sync_data.xtype())
if xtype is String:
np_dtype = string_to_np_dtype(sync_data.xvalue())
if np_dtype:
myia_type = pytype_to_myiatype(np_dtype)
return Constant(
AbstractScalar({VALUE: ANYTHING, TYPE: myia_type})
)
# In any other case, we raise an exception.
raise MyiaTypeError("Unable to convert data to scalar type: %s" % sync_data) | async def to_scalar_type(info, data):
"""Convert given data to abstract scalar.
Arguments:
data: arbitrary data to convert
Returns:
an abstract scalar object if data can be converted,
otherwise raise an exception.
"""
sync_data = await data.get()
# We expect either:
# - an abstract type containing an abstract scalar with scalar type
# - an abstract scalar containing a string to be parsed to a scalar type
if isinstance(sync_data, AbstractType):
abstract_scalar = sync_data.element
if isinstance(abstract_scalar, AbstractScalar):
xtype = await force_pending(abstract_scalar.xtype())
if inspect.isclass(xtype) and issubclass(xtype, Number):
return Constant(abstract_scalar)
elif isinstance(sync_data, AbstractScalar):
xtype = await force_pending(sync_data.xtype())
if xtype is String:
np_dtype = string_to_np_dtype(sync_data.xvalue())
if np_dtype:
myia_type = pytype_to_myiatype(np_dtype)
return Constant(
AbstractScalar({VALUE: ANYTHING, TYPE: myia_type})
)
# In any other case, we raise an exception.
raise MyiaTypeError("Unable to convert data to scalar type: %s" % sync_data) |
Python | def compute_finite_diff(self) -> Dict[str, float]:
"""Compute the finite differences gradient.
Returns:
A dictionary that maps d<outname>/d<argname> to the
gradient computed by finite difference with fn on args.
"""
results: Dict[str, float] = {}
for (under, over), ipath in gen_variants(self.args, self.wiggle, ()):
under = clean_args(under)
over = clean_args(over)
under_res = self.wrap(self.fn(*under))
over_res = self.wrap(self.fn(*over))
eps = self.epsilon
@smap.variant
def mkdiff(self, a: object, b):
return (b - a) / (2 * eps)
diff = mkdiff(under_res, over_res)
for opath in gen_paths(diff, ()):
self._set_result(
results, opath, ipath, resolve_path(diff, opath)
)
self.finite_diff = results
return results | def compute_finite_diff(self) -> Dict[str, float]:
"""Compute the finite differences gradient.
Returns:
A dictionary that maps d<outname>/d<argname> to the
gradient computed by finite difference with fn on args.
"""
results: Dict[str, float] = {}
for (under, over), ipath in gen_variants(self.args, self.wiggle, ()):
under = clean_args(under)
over = clean_args(over)
under_res = self.wrap(self.fn(*under))
over_res = self.wrap(self.fn(*over))
eps = self.epsilon
@smap.variant
def mkdiff(self, a: object, b):
return (b - a) / (2 * eps)
diff = mkdiff(under_res, over_res)
for opath in gen_paths(diff, ()):
self._set_result(
results, opath, ipath, resolve_path(diff, opath)
)
self.finite_diff = results
return results |
Python | def compare(self) -> Dict[str, Dict]:
"""Compare the exact gradients to the estimated ones.
Returns:
A dictionary that maps d<outname>/d<argname> to a dictionary
that contains both gradients and a boolean 'match' field.
"""
exact = self.compute_exact()
fin = self.compute_finite_diff()
results = {}
rel = self.rel_error
for k in exact:
e = exact[k]
f = fin[k]
if e is None:
match = f == 0
elif e == f:
match = True
else:
threshold = max(abs(rel * e), abs(rel * f))
match = bool(abs(e - f) <= threshold)
results[k] = dict(exact=e, difference=f, match=match)
return results | def compare(self) -> Dict[str, Dict]:
"""Compare the exact gradients to the estimated ones.
Returns:
A dictionary that maps d<outname>/d<argname> to a dictionary
that contains both gradients and a boolean 'match' field.
"""
exact = self.compute_exact()
fin = self.compute_finite_diff()
results = {}
rel = self.rel_error
for k in exact:
e = exact[k]
f = fin[k]
if e is None:
match = f == 0
elif e == f:
match = True
else:
threshold = max(abs(rel * e), abs(rel * f))
match = bool(abs(e - f) <= threshold)
results[k] = dict(exact=e, difference=f, match=match)
return results |
Python | def assert_match(self):
"""Assert that the exact gradients match the estimated ones."""
results = self.compare()
failed = False
argspec = [
f"{name}={arg}" for name, arg in zip(self.argnames, self.args)
]
print(f"In: {', '.join(argspec)}")
outspec = [
f"{name}={arg}" for name, arg in zip(self.outnames, self.out)
]
print(f"Out: {', '.join(outspec)}")
for path, data in results.items():
if data["match"]:
print(f"{path} OK: == {data['exact']}")
else:
failed = True
print(
f"{path} MISMATCH:"
f" {data['exact']} != {data['difference']}"
f" (exact / finite diff)"
)
if failed:
raise Exception("Gradients do not match.") | def assert_match(self):
"""Assert that the exact gradients match the estimated ones."""
results = self.compare()
failed = False
argspec = [
f"{name}={arg}" for name, arg in zip(self.argnames, self.args)
]
print(f"In: {', '.join(argspec)}")
outspec = [
f"{name}={arg}" for name, arg in zip(self.outnames, self.out)
]
print(f"Out: {', '.join(outspec)}")
for path, data in results.items():
if data["match"]:
print(f"{path} OK: == {data['exact']}")
else:
failed = True
print(
f"{path} MISMATCH:"
f" {data['exact']} != {data['difference']}"
f" (exact / finite diff)"
)
if failed:
raise Exception("Gradients do not match.") |
Python | def __fprop__switch(jcond, jtb, jfb):
"""Backpropagator for primitive `switch`."""
cond = Jinv(jcond)
rval = operations.switch(cond, jtb, jfb)
def __bprop__switch(dout):
tb = Jinv(jtb)
fb = Jinv(jfb)
return (
newenv,
zeros_like(cond),
operations.switch(cond, dout, zeros_like(fb)),
operations.switch(cond, zeros_like(tb), dout),
)
return rval, __bprop__switch | def __fprop__switch(jcond, jtb, jfb):
"""Backpropagator for primitive `switch`."""
cond = Jinv(jcond)
rval = operations.switch(cond, jtb, jfb)
def __bprop__switch(dout):
tb = Jinv(jtb)
fb = Jinv(jfb)
return (
newenv,
zeros_like(cond),
operations.switch(cond, dout, zeros_like(fb)),
operations.switch(cond, zeros_like(tb), dout),
)
return rval, __bprop__switch |
Python | async def infer_split(self, engine, x, sections, dim):
"""Infer the return type of primitive `split`."""
sections_v = [e.xvalue() for e in sections.elements]
x_shp_v = x.xshape()
dim_v = dim.xvalue()
shp_r = ()
for s in sections_v:
shp_r = shp_r + (x_shp_v[:dim_v] + (s,) + x_shp_v[dim_v + 1 :],)
return AbstractTuple(
[
type(x)(x.element, {SHAPE: out_shape, TYPE: x.xtype()})
for out_shape in shp_r
]
) | async def infer_split(self, engine, x, sections, dim):
"""Infer the return type of primitive `split`."""
sections_v = [e.xvalue() for e in sections.elements]
x_shp_v = x.xshape()
dim_v = dim.xvalue()
shp_r = ()
for s in sections_v:
shp_r = shp_r + (x_shp_v[:dim_v] + (s,) + x_shp_v[dim_v + 1 :],)
return AbstractTuple(
[
type(x)(x.element, {SHAPE: out_shape, TYPE: x.xtype()})
for out_shape in shp_r
]
) |
Python | async def infer_transpose(
self, engine, a: AbstractArray, permutation: u64tup_typecheck
):
"""Infer the return type of primitive `transpose`."""
perm = build_value(permutation, default=ANYTHING)
if perm == ANYTHING:
shp = (ANYTHING,) * len(permutation.elements)
else:
a_shp = await force_pending(a.xshape())
if list(sorted(perm)) != list(range(len(a_shp))):
raise MyiaShapeError(
"The second argument of transpose must be a permutation of"
" all of the array's axes."
)
shp = tuple(a_shp[i] for i in perm)
return type(a)(a.element, {SHAPE: shp, TYPE: a.xtype()}) | async def infer_transpose(
self, engine, a: AbstractArray, permutation: u64tup_typecheck
):
"""Infer the return type of primitive `transpose`."""
perm = build_value(permutation, default=ANYTHING)
if perm == ANYTHING:
shp = (ANYTHING,) * len(permutation.elements)
else:
a_shp = await force_pending(a.xshape())
if list(sorted(perm)) != list(range(len(a_shp))):
raise MyiaShapeError(
"The second argument of transpose must be a permutation of"
" all of the array's axes."
)
shp = tuple(a_shp[i] for i in perm)
return type(a)(a.element, {SHAPE: shp, TYPE: a.xtype()}) |
Python | def import_mod(pkg):
"""Helper function for simple frontends.
This will return a callable that will load a module.
"""
def loader():
importlib.import_module(pkg)
return loader | def import_mod(pkg):
"""Helper function for simple frontends.
This will return a callable that will load a module.
"""
def loader():
importlib.import_module(pkg)
return loader |
Python | def collect_frontend_plugins():
"""Collect frontend plugins.
Look for entry points in namespace "myia.frontend".
Each entry point must be a frontend module.
:return: a dictionary mapping a frontend name to a loader function
to import frontend module.
"""
return {
entry_point.name: import_mod(entry_point.module_name)
for entry_point in pkg_resources.iter_entry_points("myia.frontend")
} | def collect_frontend_plugins():
"""Collect frontend plugins.
Look for entry points in namespace "myia.frontend".
Each entry point must be a frontend module.
:return: a dictionary mapping a frontend name to a loader function
to import frontend module.
"""
return {
entry_point.name: import_mod(entry_point.module_name)
for entry_point in pkg_resources.iter_entry_points("myia.frontend")
} |
Python | def constvar(cls=object):
"""Return a variable matching a Constant of the given type."""
def _is_c(n):
return n.is_constant(cls)
return var(_is_c) | def constvar(cls=object):
"""Return a variable matching a Constant of the given type."""
def _is_c(n):
return n.is_constant(cls)
return var(_is_c) |
Python | def pop(self):
"""Remove the top element of the stack and return it."""
curr, prev = self.var.get()
assert prev is not None
self.var.set(prev)
return curr | def pop(self):
"""Remove the top element of the stack and return it."""
curr, prev = self.var.get()
assert prev is not None
self.var.set(prev)
return curr |
Python | def debug_name(self):
"""Return the name, create a fresh name if needed."""
if self.name:
return self.name
prefix = ""
if self.obj is not None:
prefix = self.obj.__class__.__name__.lower()
self.name = f"_{prefix}{self.id}"
return self.name | def debug_name(self):
"""Return the name, create a fresh name if needed."""
if self.name:
return self.name
prefix = ""
if self.obj is not None:
prefix = self.obj.__class__.__name__.lower()
self.name = f"_{prefix}{self.id}"
return self.name |
Python | def find(self, prop, skip=set()):
"""Find a property in self or in self.about.debug."""
curr = self
while curr is not None:
rel = curr.about and curr.about.relation
if hasattr(curr, prop) and rel not in skip:
return getattr(curr, prop)
if not curr.about:
break
curr = curr.about.debug
return None | def find(self, prop, skip=set()):
"""Find a property in self or in self.about.debug."""
curr = self
while curr is not None:
rel = curr.about and curr.about.relation
if hasattr(curr, prop) and rel not in skip:
return getattr(curr, prop)
if not curr.about:
break
curr = curr.about.debug
return None |
Python | async def infer_tuple_setitem(
self,
engine,
arg: lib.AbstractTuple,
idx: xtype.Int[64],
value: lib.AbstractValue,
):
"""Infer the return type of primitive `tuple_setitem`."""
nelems = len(arg.elements)
idx_v = self.require_constant(idx, argnum=2, range=range(-nelems, nelems))
elts = arg.elements
new_elts = tuple([*elts[:idx_v], value, *elts[idx_v + 1 :]])
return lib.AbstractTuple(new_elts) | async def infer_tuple_setitem(
self,
engine,
arg: lib.AbstractTuple,
idx: xtype.Int[64],
value: lib.AbstractValue,
):
"""Infer the return type of primitive `tuple_setitem`."""
nelems = len(arg.elements)
idx_v = self.require_constant(idx, argnum=2, range=range(-nelems, nelems))
elts = arg.elements
new_elts = tuple([*elts[:idx_v], value, *elts[idx_v + 1 :]])
return lib.AbstractTuple(new_elts) |
Python | async def resolve(info, r_data, r_item):
"""Perform static name resolution on a Namespace."""
data_v, item_v = await info.build_all(r_data, r_item)
if not isinstance(data_v, Namespace): # pragma: no cover
raise MyiaTypeError(
f"data argument to resolve must be Namespace," f" not {data_v}"
)
if not isinstance(item_v, str): # pragma: no cover
raise MyiaTypeError(
f"item argument to resolve must be a string," f" not {item_v}."
)
try:
resolved = data_v[item_v]
except NameError:
raise MyiaNameError(f"Cannot resolve name '{item_v}'")
return Constant(resolved) | async def resolve(info, r_data, r_item):
"""Perform static name resolution on a Namespace."""
data_v, item_v = await info.build_all(r_data, r_item)
if not isinstance(data_v, Namespace): # pragma: no cover
raise MyiaTypeError(
f"data argument to resolve must be Namespace," f" not {data_v}"
)
if not isinstance(item_v, str): # pragma: no cover
raise MyiaTypeError(
f"item argument to resolve must be a string," f" not {item_v}."
)
try:
resolved = data_v[item_v]
except NameError:
raise MyiaNameError(f"Cannot resolve name '{item_v}'")
return Constant(resolved) |
Python | def validate_annotation(annotation, abstract):
"""Check if abstract is allowed by given annotation."""
try:
annotation_merge(
type_to_abstract(annotation),
abstract,
forced=True,
bind_pending=True,
)
except MyiaTypeError as exc:
raise AnnotationMismatchError(f"{type(exc).__name__}: {exc.message}")
return abstract | def validate_annotation(annotation, abstract):
"""Check if abstract is allowed by given annotation."""
try:
annotation_merge(
type_to_abstract(annotation),
abstract,
forced=True,
bind_pending=True,
)
except MyiaTypeError as exc:
raise AnnotationMismatchError(f"{type(exc).__name__}: {exc.message}")
return abstract |
Python | def reset(self):
"""Reset all of the InferenceEngine's caches."""
self.cache = EvaluationCache(
loop=self.loop,
keycalc=self.compute_ref,
keytransform=self.get_actual_ref,
)
self.reference_map = {}
self.new_reference_map = {}
self.constructors = {} | def reset(self):
"""Reset all of the InferenceEngine's caches."""
self.cache = EvaluationCache(
loop=self.loop,
keycalc=self.compute_ref,
keytransform=self.get_actual_ref,
)
self.reference_map = {}
self.new_reference_map = {}
self.constructors = {} |
Python | async def infer_function(self, fn, argspec, outspec=None):
"""Infer a function call on the given argspec/outspec."""
if not isinstance(fn, Function):
fn = to_abstract(fn).get_unique()
vfn = AbstractFunctionUnique(argspec, outspec)
out = await execute_inferrers(
self,
[self.get_inferrer_for(fn)],
VirtualReference(vfn.output),
[VirtualReference(arg) for arg in vfn.args],
)
if outspec is not None:
self.abstract_merge(out, vfn.output)
return out | async def infer_function(self, fn, argspec, outspec=None):
"""Infer a function call on the given argspec/outspec."""
if not isinstance(fn, Function):
fn = to_abstract(fn).get_unique()
vfn = AbstractFunctionUnique(argspec, outspec)
out = await execute_inferrers(
self,
[self.get_inferrer_for(fn)],
VirtualReference(vfn.output),
[VirtualReference(arg) for arg in vfn.args],
)
if outspec is not None:
self.abstract_merge(out, vfn.output)
return out |
Python | def run(self, graph, *, argspec, outspec=None):
"""Run the inferrer on a graph given initial values.
Arguments:
graph: The graph to analyze.
argspec: The arguments. Must be a tuple of AbstractValue.
outspec (optional): Expected inference result. If provided,
inference result will be checked against it.
"""
self.mng.add_graph(graph)
empty_context = self.context_class.empty()
root_context = empty_context.add(graph, argspec)
out = self.run_coroutine(self.infer_function(graph, argspec, outspec))
out = concretize_abstract(out)
return out, root_context | def run(self, graph, *, argspec, outspec=None):
"""Run the inferrer on a graph given initial values.
Arguments:
graph: The graph to analyze.
argspec: The arguments. Must be a tuple of AbstractValue.
outspec (optional): Expected inference result. If provided,
inference result will be checked against it.
"""
self.mng.add_graph(graph)
empty_context = self.context_class.empty()
root_context = empty_context.add(graph, argspec)
out = self.run_coroutine(self.infer_function(graph, argspec, outspec))
out = concretize_abstract(out)
return out, root_context |
Python | def ref(self, node, context):
"""Return a Reference to the node in the given context."""
if node.abstract is not None:
return Reference(self, node, CONTEXTLESS)
if context is CONTEXTLESS:
return Reference(self, node, CONTEXTLESS)
if node.is_constant_graph():
if node.value.abstract is not None:
with untested():
return Reference(self, node, CONTEXTLESS)
graph = node.value.parent
else:
graph = node.graph
new_context = context.filter(graph)
ref = Reference(self, node, new_context)
if new_context.graph is not graph:
raise InternalInferenceError(
f"Trying to access node '{ref.node}' of function '{graph}'"
f" from function '{context.graph}', but it is not visible"
" in that scope. This typically indicates either a bug"
" in a macro or a bug in Myia.",
refs=[ref],
)
return ref | def ref(self, node, context):
"""Return a Reference to the node in the given context."""
if node.abstract is not None:
return Reference(self, node, CONTEXTLESS)
if context is CONTEXTLESS:
return Reference(self, node, CONTEXTLESS)
if node.is_constant_graph():
if node.value.abstract is not None:
with untested():
return Reference(self, node, CONTEXTLESS)
graph = node.value.parent
else:
graph = node.graph
new_context = context.filter(graph)
ref = Reference(self, node, new_context)
if new_context.graph is not graph:
raise InternalInferenceError(
f"Trying to access node '{ref.node}' of function '{graph}'"
f" from function '{context.graph}', but it is not visible"
" in that scope. This typically indicates either a bug"
" in a macro or a bug in Myia.",
refs=[ref],
)
return ref |
Python | async def compute_ref(self, ref):
"""Compute the value associated to the Reference."""
node = ref.node
tracer().emit("request_ref", engine=self, reference=ref)
inferred = ref.node.abstract
if inferred is not None:
result = inferred
elif node.is_constant():
result = await self.infer_constant(ref)
elif node.is_apply():
result = await self.infer_apply(ref)
else: # pragma: no cover
# The check in the `ref` method should catch most of the situations
# that would otherwise end up here, so this might be inaccessible.
raise InternalInferenceError(
f"Type information for {ref.node} is unavailable."
f" This indicates either a bug in a macro or a bug in Myia.",
refs=[ref],
)
if ref.node.annotation:
result = validate_annotation(ref.node.annotation, result)
tracer().emit("compute_ref", engine=self, reference=ref, result=result)
return result | async def compute_ref(self, ref):
"""Compute the value associated to the Reference."""
node = ref.node
tracer().emit("request_ref", engine=self, reference=ref)
inferred = ref.node.abstract
if inferred is not None:
result = inferred
elif node.is_constant():
result = await self.infer_constant(ref)
elif node.is_apply():
result = await self.infer_apply(ref)
else: # pragma: no cover
# The check in the `ref` method should catch most of the situations
# that would otherwise end up here, so this might be inaccessible.
raise InternalInferenceError(
f"Type information for {ref.node} is unavailable."
f" This indicates either a bug in a macro or a bug in Myia.",
refs=[ref],
)
if ref.node.annotation:
result = validate_annotation(ref.node.annotation, result)
tracer().emit("compute_ref", engine=self, reference=ref, result=result)
return result |
Python | async def reroute(self, orig, new):
"""Set the inference result for orig to the result for new.
This sets an entry in reference_map from orig to new.
"""
if not new.node.debug.about:
# This will link the old node's debug info to the new node, if
# necessary.
new.node.debug.about = About(orig.node.debug, "reroute")
self.reference_map[orig] = self.new_reference_map[orig] = new
return await self.get_inferred(new) | async def reroute(self, orig, new):
"""Set the inference result for orig to the result for new.
This sets an entry in reference_map from orig to new.
"""
if not new.node.debug.about:
# This will link the old node's debug info to the new node, if
# necessary.
new.node.debug.about = About(orig.node.debug, "reroute")
self.reference_map[orig] = self.new_reference_map[orig] = new
return await self.get_inferred(new) |
Python | def run_coroutine(self, coro):
"""Run an async function using this inferrer's loop."""
errs_before = len(self.errors)
try:
fut = self.loop.schedule(coro)
self.loop.run_forever()
self.errors.extend(self.loop.collect_errors())
new_errors = self.errors[errs_before:]
for err in new_errors:
err.engine = self
if new_errors:
new_errors.sort(key=lambda err: -getattr(err, "priority", 1000))
raise new_errors[0]
return fut.result()
finally:
for task in asyncio.all_tasks(self.loop):
task._log_destroy_pending = False | def run_coroutine(self, coro):
"""Run an async function using this inferrer's loop."""
errs_before = len(self.errors)
try:
fut = self.loop.schedule(coro)
self.loop.run_forever()
self.errors.extend(self.loop.collect_errors())
new_errors = self.errors[errs_before:]
for err in new_errors:
err.engine = self
if new_errors:
new_errors.sort(key=lambda err: -getattr(err, "priority", 1000))
raise new_errors[0]
return fut.result()
finally:
for task in asyncio.all_tasks(self.loop):
task._log_destroy_pending = False |
Python | async def infer_apply(self, ref):
"""Infer the type of a ref of an Apply node."""
ctx = ref.context
n_fn, *n_args = ref.node.inputs
# We await on the function node to get the inferrer
fn_ref = self.ref(n_fn, ctx)
fn = await fn_ref.get()
argrefs = [self.ref(node, ctx) for node in n_args]
if isinstance(fn, AbstractFunction):
infs = [self.get_inferrer_for(poss) for poss in await fn.get()]
return await self.loop.schedule(
execute_inferrers(self, infs, ref, argrefs),
context_map={infer_trace: {**infer_trace.get(), ctx: ref}},
ref=ref,
)
elif isinstance(fn, AbstractFunctionUnique):
infs = [self.get_inferrer_for(fn)]
return await self.loop.schedule(
execute_inferrers(self, infs, ref, argrefs),
context_map={infer_trace: {**infer_trace.get(), ctx: ref}},
ref=ref,
)
else:
g = ref.node.graph
newcall = g.apply(operations.call_object, n_fn, *n_args)
return await self.reroute(ref, self.ref(newcall, ctx)) | async def infer_apply(self, ref):
"""Infer the type of a ref of an Apply node."""
ctx = ref.context
n_fn, *n_args = ref.node.inputs
# We await on the function node to get the inferrer
fn_ref = self.ref(n_fn, ctx)
fn = await fn_ref.get()
argrefs = [self.ref(node, ctx) for node in n_args]
if isinstance(fn, AbstractFunction):
infs = [self.get_inferrer_for(poss) for poss in await fn.get()]
return await self.loop.schedule(
execute_inferrers(self, infs, ref, argrefs),
context_map={infer_trace: {**infer_trace.get(), ctx: ref}},
ref=ref,
)
elif isinstance(fn, AbstractFunctionUnique):
infs = [self.get_inferrer_for(fn)]
return await self.loop.schedule(
execute_inferrers(self, infs, ref, argrefs),
context_map={infer_trace: {**infer_trace.get(), ctx: ref}},
ref=ref,
)
else:
g = ref.node.graph
newcall = g.apply(operations.call_object, n_fn, *n_args)
return await self.reroute(ref, self.ref(newcall, ctx)) |
Python | async def infer_constant(self, ctref):
"""Infer the type of a ref of a Constant node."""
cvt = self.resources.convert(ctref.node.value)
if cvt is ctref.node.value:
return to_abstract(
ctref.node.value,
context=ctref.context,
node=ctref.node,
loop=self.loop,
)
else:
# The current Constant's value is not compatible with the
# pipeline so we redirect to a correct one.
newct = Constant(cvt)
new = self.ref(newct, ctref.context)
return await self.reroute(ctref, new) | async def infer_constant(self, ctref):
"""Infer the type of a ref of a Constant node."""
cvt = self.resources.convert(ctref.node.value)
if cvt is ctref.node.value:
return to_abstract(
ctref.node.value,
context=ctref.context,
node=ctref.node,
loop=self.loop,
)
else:
# The current Constant's value is not compatible with the
# pipeline so we redirect to a correct one.
newct = Constant(cvt)
new = self.ref(newct, ctref.context)
return await self.reroute(ctref, new) |
Python | def abstract_merge(self, *values):
"""Merge a list of AbstractValues together."""
from .amerge import amerge_engine
token = amerge_engine.set(self)
try:
rval = reduce(amerge, values)
finally:
amerge_engine.reset(token)
return rval | def abstract_merge(self, *values):
"""Merge a list of AbstractValues together."""
from .amerge import amerge_engine
token = amerge_engine.set(self)
try:
rval = reduce(amerge, values)
finally:
amerge_engine.reset(token)
return rval |
Python | def check_predicate(self, predicate, x):
"""Returns whether the predicate applies on x.
A predicate can be:
* A Myia type (xtype.Int[64] etc.)
* A Python class
* A callable that returns a boolean
"""
if isinstance(predicate, xtype.TypeMeta):
if isinstance(x, AbstractValue):
x = x.xtype()
if x is None:
return False
return isinstance(x, type) and issubclass(x, predicate)
elif isinstance(predicate, type):
return isinstance(x, predicate)
elif callable(predicate):
return predicate(self, x)
else:
raise ValueError(predicate) | def check_predicate(self, predicate, x):
"""Returns whether the predicate applies on x.
A predicate can be:
* A Myia type (xtype.Int[64] etc.)
* A Python class
* A callable that returns a boolean
"""
if isinstance(predicate, xtype.TypeMeta):
if isinstance(x, AbstractValue):
x = x.xtype()
if x is None:
return False
return isinstance(x, type) and issubclass(x, predicate)
elif isinstance(predicate, type):
return isinstance(x, predicate)
elif callable(predicate):
return predicate(self, x)
else:
raise ValueError(predicate) |
Python | def check(self, predicate, *values):
"""Merge all values and check that the predicate applies.
Some values may be Pending, in which case a check will be
scheduled when they are finally resolved.
"""
for value in values:
if isinstance(value, Pending):
value.add_done_callback(
lambda fut: self.assert_predicate(predicate, fut.result())
)
else:
self.assert_predicate(predicate, value)
return self.abstract_merge(*values) | def check(self, predicate, *values):
"""Merge all values and check that the predicate applies.
Some values may be Pending, in which case a check will be
scheduled when they are finally resolved.
"""
for value in values:
if isinstance(value, Pending):
value.add_done_callback(
lambda fut: self.assert_predicate(predicate, fut.result())
)
else:
self.assert_predicate(predicate, value)
return self.abstract_merge(*values) |
Python | def concretize_cache(self):
"""Complete the engine's caches with concretized contexts."""
concretize_cache(self.cache.new, dest=self.cache.cache)
self.cache.new = {}
concretize_cache(self.new_reference_map, dest=self.reference_map)
self.new_reference_map = {} | def concretize_cache(self):
"""Complete the engine's caches with concretized contexts."""
concretize_cache(self.cache.new, dest=self.cache.cache)
self.cache.new = {}
concretize_cache(self.new_reference_map, dest=self.reference_map)
self.new_reference_map = {} |
Python | def run(self, nodes):
"""Infer the types of the given nodes."""
async def _run(todo):
for node in todo:
await self.get_inferred(self.ref(node, CONTEXTLESS))
self.reset()
todo = OrderedSet()
nodes = OrderedSet(nodes)
while nodes:
node = nodes.pop()
calls = OrderedSet(
[user for user, idx in self.mng.uses[node] if idx == 0]
)
for call in calls:
call.abstract = None
nodes.update(calls)
todo.add(node)
self.run_coroutine(_run(todo))
for ref, fut in self.cache.cache.items():
new_ref = self.get_actual_ref(ref)
if new_ref is not ref:
self.mng.replace(ref.node, new_ref.node)
ref = new_ref
result = fut.result()
result = self.fix_type(result)
ref.node.abstract = concretize_abstract(result) | def run(self, nodes):
"""Infer the types of the given nodes."""
async def _run(todo):
for node in todo:
await self.get_inferred(self.ref(node, CONTEXTLESS))
self.reset()
todo = OrderedSet()
nodes = OrderedSet(nodes)
while nodes:
node = nodes.pop()
calls = OrderedSet(
[user for user, idx in self.mng.uses[node] if idx == 0]
)
for call in calls:
call.abstract = None
nodes.update(calls)
todo.add(node)
self.run_coroutine(_run(todo))
for ref, fut in self.cache.cache.items():
new_ref = self.get_actual_ref(ref)
if new_ref is not ref:
self.mng.replace(ref.node, new_ref.node)
ref = new_ref
result = fut.result()
result = self.fix_type(result)
ref.node.abstract = concretize_abstract(result) |
Python | def nokw(self, args):
"""Assert that there are no keyword arguments."""
for arg in args:
if isinstance(arg, AbstractKeywordArgument):
raise MyiaTypeError("Keyword arguments are not allowed here") | def nokw(self, args):
"""Assert that there are no keyword arguments."""
for arg in args:
if isinstance(arg, AbstractKeywordArgument):
raise MyiaTypeError("Keyword arguments are not allowed here") |
Python | async def infer(self, engine, *args):
"""Infer the abstract result given the abstract arguments."""
g = self.get_graph(engine, args)
nargs = len(g.parameters)
if len(args) != nargs:
raise type_error_nargs(self._graph, nargs, len(args))
# args were already normalized by run()
context = self.make_context(engine, args, normalize=False)
tracer().emit_infer_context(engine=engine, context=context)
# We associate each parameter of the Graph with its value for each
# property, in the context we built.
for p, arg in zip(g.parameters, context.argkey):
if p.annotation:
validate_annotation(p.annotation, arg)
ref = engine.ref(p, context)
engine.cache.set_value(ref, arg)
out = engine.ref(g.return_, context)
return await engine.get_inferred(out) | async def infer(self, engine, *args):
"""Infer the abstract result given the abstract arguments."""
g = self.get_graph(engine, args)
nargs = len(g.parameters)
if len(args) != nargs:
raise type_error_nargs(self._graph, nargs, len(args))
# args were already normalized by run()
context = self.make_context(engine, args, normalize=False)
tracer().emit_infer_context(engine=engine, context=context)
# We associate each parameter of the Graph with its value for each
# property, in the context we built.
for p, arg in zip(g.parameters, context.argkey):
if p.annotation:
validate_annotation(p.annotation, arg)
ref = engine.ref(p, context)
engine.cache.set_value(ref, arg)
out = engine.ref(g.return_, context)
return await engine.get_inferred(out) |
Python | def compute_bprop_type(orig_fn, args, out):
"""Compute the abstract type of the bprop for orig_fn."""
fn = AbstractFunction(orig_fn)
bparams = [sensitivity_transform(fn)]
bparams += [sensitivity_transform(a) for a in args]
bparams_final = AbstractTuple(bparams)
return AbstractFunctionUnique((sensitivity_transform(out),), bparams_final) | def compute_bprop_type(orig_fn, args, out):
"""Compute the abstract type of the bprop for orig_fn."""
fn = AbstractFunction(orig_fn)
bparams = [sensitivity_transform(fn)]
bparams += [sensitivity_transform(a) for a in args]
bparams_final = AbstractTuple(bparams)
return AbstractFunctionUnique((sensitivity_transform(out),), bparams_final) |
Python | async def infer(self, engine, *args):
"""Infer the abstract result given the abstract arguments."""
infer_trace.set({**infer_trace.get(), self.prim: (self.prim, args)})
await self.checker.check(engine, args)
return await self._infer(self, engine, *args) | async def infer(self, engine, *args):
"""Infer the abstract result given the abstract arguments."""
infer_trace.set({**infer_trace.get(), self.prim: (self.prim, args)})
await self.checker.check(engine, args)
return await self._infer(self, engine, *args) |
Python | def require_constant(self, a, *, argnum, range=None):
"""Returns the constant associated to abstract argument a.
If a is not a constant, raises a MyiaTypeError.
Arguments:
a: Value to check for constantness
argnum (int): Which argument we are checking.
range (optional): A range or collection in which the argument
must lie.
"""
v = a.xvalue()
if v is ANYTHING:
raise MyiaTypeError(
f"Argument {argnum} to {self.prim} must be constant."
)
if range is not None and v not in range:
raise MyiaTypeError(
f"Argument {argnum} to {self.prim} is out of range."
f" It should lie in {range}"
)
return v | def require_constant(self, a, *, argnum, range=None):
"""Returns the constant associated to abstract argument a.
If a is not a constant, raises a MyiaTypeError.
Arguments:
a: Value to check for constantness
argnum (int): Which argument we are checking.
range (optional): A range or collection in which the argument
must lie.
"""
v = a.xvalue()
if v is ANYTHING:
raise MyiaTypeError(
f"Argument {argnum} to {self.prim} must be constant."
)
if range is not None and v not in range:
raise MyiaTypeError(
f"Argument {argnum} to {self.prim} is out of range."
f" It should lie in {range}"
)
return v |
Python | def standard_prim(prim):
"""Decorator to define and register a StandardInferrer."""
def deco(fn):
if isinstance(fn, type):
return fn.partial()
else:
return StandardInferrer.partial(prim=prim, infer=fn)
return deco | def standard_prim(prim):
"""Decorator to define and register a StandardInferrer."""
def deco(fn):
if isinstance(fn, type):
return fn.partial()
else:
return StandardInferrer.partial(prim=prim, infer=fn)
return deco |
Python | def normalize_args_sync(self, args):
"""If infer_value is False, return broadened arguments."""
if not self.infer_value:
args = tuple(_broaden(a) for a in args)
return args | def normalize_args_sync(self, args):
"""If infer_value is False, return broadened arguments."""
if not self.infer_value:
args = tuple(_broaden(a) for a in args)
return args |
Python | async def infer(self, engine, *args):
"""Infer the abstract result given the abstract arguments."""
infer_trace.set({**infer_trace.get(), self.prim: (self.prim, args)})
if any(not isinstance(arg, AbstractScalar) for arg in args):
raise MyiaTypeError(f"Expected scalar as argument to {self.prim}")
ts = [arg.xtype() for arg in args]
outtype = await self.checker.check(engine, ts, uniform=True)
return self.run_impl(engine, args, outtype) | async def infer(self, engine, *args):
"""Infer the abstract result given the abstract arguments."""
infer_trace.set({**infer_trace.get(), self.prim: (self.prim, args)})
if any(not isinstance(arg, AbstractScalar) for arg in args):
raise MyiaTypeError(f"Expected scalar as argument to {self.prim}")
ts = [arg.xtype() for arg in args]
outtype = await self.checker.check(engine, ts, uniform=True)
return self.run_impl(engine, args, outtype) |
Python | def run_impl(self, engine, args, outtype):
"""Run the implementation on abstract data.
If infer_value is False, this returns an AbstractScalar with value
ANYTHING.
Arguments: engine: The InferenceEngine args: The abstract arguments
outtype: The output type to give to the result
"""
if not self.infer_value:
outval = ANYTHING
else:
values = [arg.xvalue() for arg in args]
if any(v is ANYTHING for v in values):
outval = ANYTHING
else:
outval = self.impl(*values)
return AbstractScalar({VALUE: outval, TYPE: outtype}) | def run_impl(self, engine, args, outtype):
"""Run the implementation on abstract data.
If infer_value is False, this returns an AbstractScalar with value
ANYTHING.
Arguments: engine: The InferenceEngine args: The abstract arguments
outtype: The output type to give to the result
"""
if not self.infer_value:
outval = ANYTHING
else:
values = [arg.xvalue() for arg in args]
if any(v is ANYTHING for v in values):
outval = ANYTHING
else:
outval = self.impl(*values)
return AbstractScalar({VALUE: outval, TYPE: outtype}) |
Python | async def execute_inferrers(engine, inferrers, outref, argrefs):
"""Execute a set of inferrers on a tuple of References.
The results of the inferrers will be bound together and an error will
be raised eventually if they cannot be merged.
"""
reroutes = set(
[await inf.reroute(engine, outref, argrefs) for inf in inferrers]
)
if len(reroutes) > 1:
# We do no rerouting if there is more than one possibility
reroutes = {None}
(newref,) = reroutes
if newref is not None:
return await engine.reroute(outref, newref)
if len(inferrers) == 1:
(inf,) = inferrers
return await _run_trace(inf, engine, outref, argrefs)
else:
pending = []
for inf in inferrers:
p = engine.loop.create_pending(resolve=None, priority=lambda: None)
pending.append(p)
engine.loop.schedule(
_inf_helper(engine, inf, outref, argrefs, p), ref=outref
)
return bind(engine.loop, None, [], pending) | async def execute_inferrers(engine, inferrers, outref, argrefs):
"""Execute a set of inferrers on a tuple of References.
The results of the inferrers will be bound together and an error will
be raised eventually if they cannot be merged.
"""
reroutes = set(
[await inf.reroute(engine, outref, argrefs) for inf in inferrers]
)
if len(reroutes) > 1:
# We do no rerouting if there is more than one possibility
reroutes = {None}
(newref,) = reroutes
if newref is not None:
return await engine.reroute(outref, newref)
if len(inferrers) == 1:
(inf,) = inferrers
return await _run_trace(inf, engine, outref, argrefs)
else:
pending = []
for inf in inferrers:
p = engine.loop.create_pending(resolve=None, priority=lambda: None)
pending.append(p)
engine.loop.schedule(
_inf_helper(engine, inf, outref, argrefs, p), ref=outref
)
return bind(engine.loop, None, [], pending) |
Python | async def infer_distribute(
self, engine, a: AbstractArray, _shp: u64tup_typecheck
):
"""Infer the return type of primitive `distribute`."""
shp = tuple(x.xvalue() for x in _shp.elements)
a_shp = await force_pending(a.xshape())
delta = len(shp) - len(a_shp)
if delta < 0:
raise MyiaShapeError("Cannot distribute to smaller shape")
elif delta > 0:
a_shp = (1,) * delta + a_shp
for vs, s in zip(a_shp, shp):
if vs != s and vs not in (1, ANYTHING) and s not in (1, ANYTHING):
raise MyiaShapeError("Cannot change shape when distributing")
return type(a)(a.element, {SHAPE: shp, TYPE: a.xtype()}) | async def infer_distribute(
self, engine, a: AbstractArray, _shp: u64tup_typecheck
):
"""Infer the return type of primitive `distribute`."""
shp = tuple(x.xvalue() for x in _shp.elements)
a_shp = await force_pending(a.xshape())
delta = len(shp) - len(a_shp)
if delta < 0:
raise MyiaShapeError("Cannot distribute to smaller shape")
elif delta > 0:
a_shp = (1,) * delta + a_shp
for vs, s in zip(a_shp, shp):
if vs != s and vs not in (1, ANYTHING) and s not in (1, ANYTHING):
raise MyiaShapeError("Cannot change shape when distributing")
return type(a)(a.element, {SHAPE: shp, TYPE: a.xtype()}) |
Python | def build_value(a, default=ABSENT):
"""Build a concrete value out of an abstract one.
A concrete value cannot be built if, for some abstract data, the inferred
value is ANYTHING. Some types such as AbstractArray cannot be built
either.
Arguments:
a: The abstract value.
default: A default value to return if the value cannot be built.
If not provided, a ValueError will be raised in those cases.
"""
def return_default(err):
if default is ABSENT:
raise err
else:
return default
if isinstance(a, AbstractType):
return a.element
v = a.values.get(VALUE, ABSENT)
if v is ANYTHING or isinstance(v, Possibilities):
return return_default(ValueError(v))
elif v is ABSENT:
try:
return _build_value(a)
except ValueError as e:
return return_default(e)
elif isinstance(v, Pending):
if v.done():
return v.result()
else:
return return_default(ValueError(v))
else:
return v | def build_value(a, default=ABSENT):
"""Build a concrete value out of an abstract one.
A concrete value cannot be built if, for some abstract data, the inferred
value is ANYTHING. Some types such as AbstractArray cannot be built
either.
Arguments:
a: The abstract value.
default: A default value to return if the value cannot be built.
If not provided, a ValueError will be raised in those cases.
"""
def return_default(err):
if default is ABSENT:
raise err
else:
return default
if isinstance(a, AbstractType):
return a.element
v = a.values.get(VALUE, ABSENT)
if v is ANYTHING or isinstance(v, Possibilities):
return return_default(ValueError(v))
elif v is ABSENT:
try:
return _build_value(a)
except ValueError as e:
return return_default(e)
elif isinstance(v, Pending):
if v.done():
return v.result()
else:
return return_default(ValueError(v))
else:
return v |
Python | def abstract_check(self, x, **kwargs):
"""Check that a predicate applies to a given object."""
__call__ = self.resolve(x)
def proceed():
if prop:
if hasattr(x, prop):
return getattr(x, prop) is x
elif __call__(x, **kwargs):
if isinstance(x, AbstractValue):
setattr(x, prop, x)
return True
else:
return False
else:
return __call__(x, **kwargs)
prop = self.prop
cache = self.cache
try:
rval = cache.get(x, None)
except TypeError:
return proceed()
if rval is None:
cache[x] = True
cache[x] = proceed()
return cache[x]
else:
return rval | def abstract_check(self, x, **kwargs):
"""Check that a predicate applies to a given object."""
__call__ = self.resolve(x)
def proceed():
if prop:
if hasattr(x, prop):
return getattr(x, prop) is x
elif __call__(x, **kwargs):
if isinstance(x, AbstractValue):
setattr(x, prop, x)
return True
else:
return False
else:
return __call__(x, **kwargs)
prop = self.prop
cache = self.cache
try:
rval = cache.get(x, None)
except TypeError:
return proceed()
if rval is None:
cache[x] = True
cache[x] = proceed()
return cache[x]
else:
return rval |
Python | def concretize_abstract(self, x: Pending):
"""Clone an abstract value while resolving all Pending (synchronous)."""
if x.done():
return self(x.result())
else:
raise AssertionError("Unresolved Pending", x) | def concretize_abstract(self, x: Pending):
"""Clone an abstract value while resolving all Pending (synchronous)."""
if x.done():
return self(x.result())
else:
raise AssertionError("Unresolved Pending", x) |
Python | def concretize_cache(src, dest=None):
"""Complete a cache with concretized versions of its keys.
If an entry in the cache has a key that contains a Pending, a new key
is created where the Pending is resolved, and it is entered in the cache
so that it can be found more easily.
"""
if dest is None:
dest = src
for k, v in list(src.items()):
kc = refmap(concretize_abstract, k)
dest[kc] = v
kc2 = refmap(no_tracking_id, kc)
dest[kc2] = v | def concretize_cache(src, dest=None):
"""Complete a cache with concretized versions of its keys.
If an entry in the cache has a key that contains a Pending, a new key
is created where the Pending is resolved, and it is entered in the cache
so that it can be found more easily.
"""
if dest is None:
dest = src
for k, v in list(src.items()):
kc = refmap(concretize_abstract, k)
dest[kc] = v
kc2 = refmap(no_tracking_id, kc)
dest[kc2] = v |
Python | def broaden(self, d: TrackDict, **kwargs): # noqa: D417
"""Broaden an abstract value.
* Concrete values such as 1 or True will be broadened to ANYTHING.
Arguments:
d: The abstract data to clone.
"""
return {k: k.broaden(v, self, **kwargs) for k, v in d.items()} | def broaden(self, d: TrackDict, **kwargs): # noqa: D417
"""Broaden an abstract value.
* Concrete values such as 1 or True will be broadened to ANYTHING.
Arguments:
d: The abstract data to clone.
"""
return {k: k.broaden(v, self, **kwargs) for k, v in d.items()} |
Python | def sensitivity_transform(self, x: (AbstractFunction, AbstractFunctionUnique)):
"""Return an abstract value for the sensitivity of x.
* The sensitivity of a function is an Env
* The sensitivity of J(x) is x
* We set the sensitivity of a random state as a nil scalar
"""
return AbstractScalar({VALUE: ANYTHING, TYPE: xtype.EnvType}) | def sensitivity_transform(self, x: (AbstractFunction, AbstractFunctionUnique)):
"""Return an abstract value for the sensitivity of x.
* The sensitivity of a function is an Env
* The sensitivity of J(x) is x
* We set the sensitivity of a random state as a nil scalar
"""
return AbstractScalar({VALUE: ANYTHING, TYPE: xtype.EnvType}) |
Python | async def force_through(self, x, through):
"""Clone an abstract value (asynchronous)."""
__call__ = self[type(x), object]
if not isinstance(x, through) and not isinstance(x, Pending):
return x
cache = self.state
if isinstance(x, AbstractValue) and x in cache:
return cache[x]
call = __call__(x, through)
if isinstance(call, AsyncGeneratorType):
cls = await call.asend(None)
inst = cls.empty()
cache[x] = inst
constructor = _make_constructor(inst)
rval = await call.asend(constructor)
assert rval is inst
return rval
else:
return await call | async def force_through(self, x, through):
"""Clone an abstract value (asynchronous)."""
__call__ = self[type(x), object]
if not isinstance(x, through) and not isinstance(x, Pending):
return x
cache = self.state
if isinstance(x, AbstractValue) and x in cache:
return cache[x]
call = __call__(x, through)
if isinstance(call, AsyncGeneratorType):
cls = await call.asend(None)
inst = cls.empty()
cache[x] = inst
constructor = _make_constructor(inst)
rval = await call.asend(constructor)
assert rval is inst
return rval
else:
return await call |
Python | def refmap(self, fn, x: Context):
"""Map a function on a Reference/Context/etc."""
return Context(
self(fn, x.parent), x.graph, tuple(fn(arg) for arg in x.argkey)
) | def refmap(self, fn, x: Context):
"""Map a function on a Reference/Context/etc."""
return Context(
self(fn, x.parent), x.graph, tuple(fn(arg) for arg in x.argkey)
) |
Python | def collapse_options(options):
"""Collapse a list of options, some of which may be AbstractUnions."""
opts = []
todo = list(options)
while todo:
option = todo.pop()
if isinstance(option, AbstractUnion):
todo.extend(option.options)
else:
opts.append(option)
opts = Possibilities(opts)
return opts | def collapse_options(options):
"""Collapse a list of options, some of which may be AbstractUnions."""
opts = []
todo = list(options)
while todo:
option = todo.pop()
if isinstance(option, AbstractUnion):
todo.extend(option.options)
else:
opts.append(option)
opts = Possibilities(opts)
return opts |
Python | def union_simplify(options, constructor=AbstractUnion):
"""Simplify a list of options.
Returns:
* None, if there are no options.
* A single type, if there is one option.
* An AbstractUnion.
"""
options = collapse_options(options)
if len(options) == 0:
return None
elif len(options) == 1:
return options.pop()
else:
return constructor(options) | def union_simplify(options, constructor=AbstractUnion):
"""Simplify a list of options.
Returns:
* None, if there are no options.
* A single type, if there is one option.
* An AbstractUnion.
"""
options = collapse_options(options)
if len(options) == 0:
return None
elif len(options) == 1:
return options.pop()
else:
return constructor(options) |
Python | def normalize_adt(x):
"""Normalize the ADT to make it properly recursive."""
rval = _normalize_adt_helper(x, {}, {})
rval = rval.intern()
rval = broaden(rval)
rval = _finalize_adt(rval)
return rval | def normalize_adt(x):
"""Normalize the ADT to make it properly recursive."""
rval = _normalize_adt_helper(x, {}, {})
rval = rval.intern()
rval = broaden(rval)
rval = _finalize_adt(rval)
return rval |
Python | def manage(*graphs, weak=False):
"""Ensure that all given graphs have a manager and return it.
* If one or more graphs has a manager, that manager will be used.
* If two graphs have different managers, an error will be raised.
* If no graph has a manager, one will be created.
Arguments:
graphs: The graphs to manage.
weak: If True, when creating a new manager, graphs will not
be forcefully associated with it. (Defaults to False.)
"""
manager = None
for graph in graphs:
manager = graph._manager
if manager:
break
if manager is None:
manager = GraphManager(manage=not weak)
root = True
else:
root = False
for graph in graphs:
manager.add_graph(graph, root=root)
return manager | def manage(*graphs, weak=False):
"""Ensure that all given graphs have a manager and return it.
* If one or more graphs has a manager, that manager will be used.
* If two graphs have different managers, an error will be raised.
* If no graph has a manager, one will be created.
Arguments:
graphs: The graphs to manage.
weak: If True, when creating a new manager, graphs will not
be forcefully associated with it. (Defaults to False.)
"""
manager = None
for graph in graphs:
manager = graph._manager
if manager:
break
if manager is None:
manager = GraphManager(manage=not weak)
root = True
else:
root = False
for graph in graphs:
manager.add_graph(graph, root=root)
return manager |
Python | def dec(self, graph, key, qty=1):
"""Decrement the count for self[graph][key] by qty.
The key is deleted if the count falls to zero.
"""
d = self[graph]
if d[key] == qty:
del d[key]
return True
else:
d[key] -= qty
assert d[key] > 0
return False | def dec(self, graph, key, qty=1):
"""Decrement the count for self[graph][key] by qty.
The key is deleted if the count falls to zero.
"""
d = self[graph]
if d[key] == qty:
del d[key]
return True
else:
d[key] -= qty
assert d[key] > 0
return False |
Python | def reset(self):
"""Reset this graph's information.
This makes the statistic invalid, so it must be recomputed.
"""
super().reset()
self.valid = False | def reset(self):
"""Reset this graph's information.
This makes the statistic invalid, so it must be recomputed.
"""
super().reset()
self.valid = False |
Python | def gc(self):
"""Garbage-collect disconnected graphs.
Normally this is done incrementally through reference counting, but
because of circular references, some graphs might remain.
"""
reach = OrderedSet(self.roots)
for root in self.roots:
reach.update(self.graphs_reachable[root])
# TODO: Ideally the two lines below should be replaced by the commented
# out line, but it causes an error in
# tests/test_grad.py::test_recursive_closure
# # self._drop_all(self.graphs - reach, drop_nodes=True)
if reach != self.graphs:
self.reset() | def gc(self):
"""Garbage-collect disconnected graphs.
Normally this is done incrementally through reference counting, but
because of circular references, some graphs might remain.
"""
reach = OrderedSet(self.roots)
for root in self.roots:
reach.update(self.graphs_reachable[root])
# TODO: Ideally the two lines below should be replaced by the commented
# out line, but it causes an error in
# tests/test_grad.py::test_recursive_closure
# # self._drop_all(self.graphs - reach, drop_nodes=True)
if reach != self.graphs:
self.reset() |
Python | def reset(self):
"""Reset the manager's state.
Recompute everything from the roots.
"""
old_events = self.events
self.events = Events(
add_node=None,
drop_node=None,
add_graph=None,
drop_graph=None,
add_edge=None,
drop_edge=None,
invalidate_nesting=None,
invalidate_uses=None,
reset=None,
post_reset=None,
)
if old_events:
old_events.reset()
roots = OrderedSet(self.roots) if self.roots else OrderedSet()
self.roots = OrderedSet()
self.graphs = OrderedSet()
self.all_nodes = OrderedSet()
self.uses = defaultdict(OrderedSet)
self.nodes = NodesStatistic(self)
self.call_sites = CallSitesStatistic(self)
self.higher_order_sites = HigherOrderSitesStatistic(self)
self.constants = ConstantsStatistic(self)
self.free_variables_direct = FVDirectStatistic(self)
self.graph_constants = GraphConstantsStatistic(self)
self.graphs_used = GraphsUsedStatistic(self)
self.graph_users = GraphUsersStatistic(self)
self.graph_dependencies_direct = GDepDirectStatistic(self)
self.graph_dependencies_prox = GDepProxStatistic(self)
self.graph_dependencies_prox_inv = GDepProxInvStatistic(self)
self.graph_dependencies_total = GDepTotalStatistic(self)
self.parents = ParentStatistic(self)
self.children = ChildrenStatistic(self)
self.scopes = ScopeStatistic(self)
self._free_variables_total = FVTotalStatistic(self)
self.free_variables_extended = FVExtendedStatistic(self)
self.graphs_reachable = GraphsReachableStatistic(self)
self.recursive = RecursiveStatistic(self)
for root in roots:
self.add_graph(root, root=True)
if old_events:
old_events.post_reset() | def reset(self):
"""Reset the manager's state.
Recompute everything from the roots.
"""
old_events = self.events
self.events = Events(
add_node=None,
drop_node=None,
add_graph=None,
drop_graph=None,
add_edge=None,
drop_edge=None,
invalidate_nesting=None,
invalidate_uses=None,
reset=None,
post_reset=None,
)
if old_events:
old_events.reset()
roots = OrderedSet(self.roots) if self.roots else OrderedSet()
self.roots = OrderedSet()
self.graphs = OrderedSet()
self.all_nodes = OrderedSet()
self.uses = defaultdict(OrderedSet)
self.nodes = NodesStatistic(self)
self.call_sites = CallSitesStatistic(self)
self.higher_order_sites = HigherOrderSitesStatistic(self)
self.constants = ConstantsStatistic(self)
self.free_variables_direct = FVDirectStatistic(self)
self.graph_constants = GraphConstantsStatistic(self)
self.graphs_used = GraphsUsedStatistic(self)
self.graph_users = GraphUsersStatistic(self)
self.graph_dependencies_direct = GDepDirectStatistic(self)
self.graph_dependencies_prox = GDepProxStatistic(self)
self.graph_dependencies_prox_inv = GDepProxInvStatistic(self)
self.graph_dependencies_total = GDepTotalStatistic(self)
self.parents = ParentStatistic(self)
self.children = ChildrenStatistic(self)
self.scopes = ScopeStatistic(self)
self._free_variables_total = FVTotalStatistic(self)
self.free_variables_extended = FVExtendedStatistic(self)
self.graphs_reachable = GraphsReachableStatistic(self)
self.recursive = RecursiveStatistic(self)
for root in roots:
self.add_graph(root, root=True)
if old_events:
old_events.post_reset() |
Python | def add_graph(self, graph, root=False):
"""Add a graph to this manager, optionally as a root graph."""
if root:
self.roots.add(graph)
if graph in self.graphs:
return
self._ensure_graph(graph)
self.events.add_graph(graph)
self._acquire_nodes(graph.parameters)
self._acquire_nodes({graph.return_}) | def add_graph(self, graph, root=False):
"""Add a graph to this manager, optionally as a root graph."""
if root:
self.roots.add(graph)
if graph in self.graphs:
return
self._ensure_graph(graph)
self.events.add_graph(graph)
self._acquire_nodes(graph.parameters)
self._acquire_nodes({graph.return_}) |
Python | def keep_roots(self, *roots):
"""Keep only the graphs reachable from the given roots.
All other graphs will be removed from this manager.
If no roots are given, existing roots will be used.
"""
if roots:
self.roots = OrderedSet()
for root in roots:
self.add_graph(root, True)
else:
roots = self.roots
keep = OrderedSet()
for root in roots:
keep.update(self.graphs_reachable[root])
self._maybe_drop_graphs(
self.graphs - keep, ignore_users=True, recursive=False
) | def keep_roots(self, *roots):
"""Keep only the graphs reachable from the given roots.
All other graphs will be removed from this manager.
If no roots are given, existing roots will be used.
"""
if roots:
self.roots = OrderedSet()
for root in roots:
self.add_graph(root, True)
else:
roots = self.roots
keep = OrderedSet()
for root in roots:
keep.update(self.graphs_reachable[root])
self._maybe_drop_graphs(
self.graphs - keep, ignore_users=True, recursive=False
) |
Python | def _ensure_graph(self, graph):
"""Ensure that the graph is managed by this manager."""
if self.manage:
if graph._manager and graph._manager is not self:
raise ManagerError(
"A graph can only have one manager. " f"Graph: {graph}"
)
graph._manager = self
self.graphs.add(graph) | def _ensure_graph(self, graph):
"""Ensure that the graph is managed by this manager."""
if self.manage:
if graph._manager and graph._manager is not self:
raise ManagerError(
"A graph can only have one manager. " f"Graph: {graph}"
)
graph._manager = self
self.graphs.add(graph) |
Python | def _process_edge(self, node, key, inp, direction):
"""Add/remove an edge between two nodes.
Arguments:
node: node that will change inputs
key (int): input position
inp: input node to add/remove
direction: {1, -1}: Added/removed::
* 1 if the edge is added.
* -1 if the edge is removed.
"""
if self.check_opaque and self.check_opaque(inp):
return
if direction == -1:
if (node, key) not in self.uses[inp]:
# It's possible that we already got here when we
# dropped a graph.
return # pragma: no cover
self.uses[inp].remove((node, key))
self.events.drop_edge(node, key, inp)
else:
if inp.graph is not None:
self.add_graph(inp.graph)
if inp.is_constant_graph():
self.add_graph(inp.value)
self.uses[inp].add((node, key))
self.events.add_edge(node, key, inp) | def _process_edge(self, node, key, inp, direction):
"""Add/remove an edge between two nodes.
Arguments:
node: node that will change inputs
key (int): input position
inp: input node to add/remove
direction: {1, -1}: Added/removed::
* 1 if the edge is added.
* -1 if the edge is removed.
"""
if self.check_opaque and self.check_opaque(inp):
return
if direction == -1:
if (node, key) not in self.uses[inp]:
# It's possible that we already got here when we
# dropped a graph.
return # pragma: no cover
self.uses[inp].remove((node, key))
self.events.drop_edge(node, key, inp)
else:
if inp.graph is not None:
self.add_graph(inp.graph)
if inp.is_constant_graph():
self.add_graph(inp.value)
self.uses[inp].add((node, key))
self.events.add_edge(node, key, inp) |
Python | def _process_inputs(self, node, direction):
"""Process the inputs of a newly [dis]connected node.
Arguments:
node: node to process
direction: {1, -1}: Added/removed::
* 1 if the node was connected.
* -1 if the node was disconnected.
"""
for key, inp in enumerate(node.inputs):
self._process_edge(node, key, inp, direction) | def _process_inputs(self, node, direction):
"""Process the inputs of a newly [dis]connected node.
Arguments:
node: node to process
direction: {1, -1}: Added/removed::
* 1 if the node was connected.
* -1 if the node was disconnected.
"""
for key, inp in enumerate(node.inputs):
self._process_edge(node, key, inp, direction) |
Python | def _maybe_drop_nodes(self, nodes):
"""Check if the nodes are connected to a graph, drop them if not."""
nodes = OrderedSet(nodes)
# Set of graphs to check if we want to drop them or not
graphs_to_check = OrderedSet()
while nodes:
node = nodes.pop()
if node not in self.all_nodes:
continue
uses = self.uses[node]
if uses or (node.is_parameter() and node in node.graph.parameters):
continue
if node.is_constant_graph():
graphs_to_check.add(node.value)
self._process_inputs(node, -1)
self.all_nodes.remove(node)
self.events.drop_node(node)
nodes.update(node.inputs)
return graphs_to_check | def _maybe_drop_nodes(self, nodes):
"""Check if the nodes are connected to a graph, drop them if not."""
nodes = OrderedSet(nodes)
# Set of graphs to check if we want to drop them or not
graphs_to_check = OrderedSet()
while nodes:
node = nodes.pop()
if node not in self.all_nodes:
continue
uses = self.uses[node]
if uses or (node.is_parameter() and node in node.graph.parameters):
continue
if node.is_constant_graph():
graphs_to_check.add(node.value)
self._process_inputs(node, -1)
self.all_nodes.remove(node)
self.events.drop_node(node)
nodes.update(node.inputs)
return graphs_to_check |
Python | def free_variables_total(self):
"""Map each graph to its free variables.
This differs from `free_variables_direct` in that it also includes free
variables needed by children graphs. Furthermore, graph Constants may
figure as free variables.
"""
return self._ensure_statistic(self._free_variables_total) | def free_variables_total(self):
"""Map each graph to its free variables.
This differs from `free_variables_direct` in that it also includes free
variables needed by children graphs. Furthermore, graph Constants may
figure as free variables.
"""
return self._ensure_statistic(self._free_variables_total) |
Python | def _commit_changes(self, changes):
"""Commit changes.
This modifies the graph and update attributes and properties.
"""
addedges = Counter()
rmedges = Counter()
adds = Counter()
rms = Counter()
undolog = []
for operation, *args in changes:
if operation == "set_edge":
root_node, key, new_node = args
old_node = root_node.inputs[key]
rmedges[(root_node, key, old_node)] += 1
addedges[(root_node, key, new_node)] += 1
rms[old_node] += 1
adds[new_node] += 1
root_node.inputs[key] = new_node
undolog.append(("set_edge", root_node, key, old_node))
elif operation == "set_parameters":
graph, new_parameters = args
old_parameters = graph.parameters
for p in new_parameters:
adds[p] += 1
for p in old_parameters:
rms[p] += 1
graph.parameters = new_parameters
undolog.append(("set_parameters", graph, old_parameters))
for root_node, key, new_node in addedges - rmedges:
self._process_edge(root_node, key, new_node, 1)
self._acquire_nodes(adds - rms)
for root_node, key, old_node in rmedges - addedges:
self._process_edge(root_node, key, old_node, -1)
maybe_drop_graphs = self._maybe_drop_nodes(rms - adds)
self._maybe_drop_graphs(maybe_drop_graphs)
return undolog | def _commit_changes(self, changes):
"""Commit changes.
This modifies the graph and update attributes and properties.
"""
addedges = Counter()
rmedges = Counter()
adds = Counter()
rms = Counter()
undolog = []
for operation, *args in changes:
if operation == "set_edge":
root_node, key, new_node = args
old_node = root_node.inputs[key]
rmedges[(root_node, key, old_node)] += 1
addedges[(root_node, key, new_node)] += 1
rms[old_node] += 1
adds[new_node] += 1
root_node.inputs[key] = new_node
undolog.append(("set_edge", root_node, key, old_node))
elif operation == "set_parameters":
graph, new_parameters = args
old_parameters = graph.parameters
for p in new_parameters:
adds[p] += 1
for p in old_parameters:
rms[p] += 1
graph.parameters = new_parameters
undolog.append(("set_parameters", graph, old_parameters))
for root_node, key, new_node in addedges - rmedges:
self._process_edge(root_node, key, new_node, 1)
self._acquire_nodes(adds - rms)
for root_node, key, old_node in rmedges - addedges:
self._process_edge(root_node, key, old_node, -1)
maybe_drop_graphs = self._maybe_drop_nodes(rms - adds)
self._maybe_drop_graphs(maybe_drop_graphs)
return undolog |
Python | def generate_data(
n, batch_size, input_size, target_size, sequence_size, *, seed=91
):
"""Generate inputs and targets.
Generates n batches of samples of size input_size, matched with
a single target.
"""
R = RandomState(seed=seed)
return [
(
[param(R, batch_size, input_size) for i in range(sequence_size)],
param(R, batch_size, target_size),
)
for i in range(n)
] | def generate_data(
n, batch_size, input_size, target_size, sequence_size, *, seed=91
):
"""Generate inputs and targets.
Generates n batches of samples of size input_size, matched with
a single target.
"""
R = RandomState(seed=seed)
return [
(
[param(R, batch_size, input_size) for i in range(sequence_size)],
param(R, batch_size, target_size),
)
for i in range(n)
] |
Python | def lstm_parameters(*layer_sizes, batch_size, seed=6666):
"""Generates parameters for a MLP given a list of layer sizes."""
R = RandomState(seed=seed)
i, h, *rest = layer_sizes
W_i = param(R, i, h)
W_f = param(R, i, h)
W_c = param(R, i, h)
W_o = param(R, i, h)
R_i = param(R, h, h)
R_f = param(R, h, h)
R_c = param(R, h, h)
R_o = param(R, h, h)
b_i = param(R, 1, h)
b_f = param(R, 1, h)
b_c = param(R, 1, h)
b_o = param(R, 1, h)
s0 = numpy.zeros((1, h), dtype=dtype)
c0 = numpy.zeros((1, h), dtype=dtype)
parameters = [
(W_i, W_f, W_c, W_o, R_i, R_f, R_c, R_o, b_i, b_f, b_c, b_o, s0, c0)
]
for i, o in zip((h, *rest[:-1]), rest):
W = param(R, i, o)
b = param(R, 1, o)
parameters.append((W, b))
return parameters | def lstm_parameters(*layer_sizes, batch_size, seed=6666):
"""Generates parameters for a MLP given a list of layer sizes."""
R = RandomState(seed=seed)
i, h, *rest = layer_sizes
W_i = param(R, i, h)
W_f = param(R, i, h)
W_c = param(R, i, h)
W_o = param(R, i, h)
R_i = param(R, h, h)
R_f = param(R, h, h)
R_c = param(R, h, h)
R_o = param(R, h, h)
b_i = param(R, 1, h)
b_f = param(R, 1, h)
b_c = param(R, 1, h)
b_o = param(R, 1, h)
s0 = numpy.zeros((1, h), dtype=dtype)
c0 = numpy.zeros((1, h), dtype=dtype)
parameters = [
(W_i, W_f, W_c, W_o, R_i, R_f, R_c, R_o, b_i, b_f, b_c, b_o, s0, c0)
]
for i, o in zip((h, *rest[:-1]), rest):
W = param(R, i, o)
b = param(R, 1, o)
parameters.append((W, b))
return parameters |
Python | def step(model, lr, x, y):
"""Returns the loss and parameter gradients."""
# value_and_grad will return cost(model, x, y) and dcost(...)/dmodel.
# The 'model' argument can be omitted: by default the derivative wrt
# the first argument is returned.
_cost, dmodel = value_and_grad(cost, "model")(model, x, y)
return _cost, model - (lr * dmodel) | def step(model, lr, x, y):
"""Returns the loss and parameter gradients."""
# value_and_grad will return cost(model, x, y) and dcost(...)/dmodel.
# The 'model' argument can be omitted: by default the derivative wrt
# the first argument is returned.
_cost, dmodel = value_and_grad(cost, "model")(model, x, y)
return _cost, model - (lr * dmodel) |
Python | def run_helper(epochs, n, batch_size, layer_sizes):
"""Run a model with the specified layer sizes on n random batches.
The first layer is an LSTM layer, the rest are linear+tanh.
Arguments:
epochs: How many epochs to run.
n: Number of training batches to generate.
batch_size: Number of samples per batch.
layer_sizes: Sizes of the model's layers.
"""
layers = []
lstmp, *linp = lstm_parameters(*layer_sizes, batch_size=batch_size)
layers.append(LSTMLayer(*lstmp))
for W, b in linp:
layers.append(Linear(W, b))
layers.append(Tanh())
model = Sequential(tuple(layers))
data = generate_data(n, batch_size, layer_sizes[0], layer_sizes[-1], 10)
lr = getattr(numpy, dtype)(0.01)
for _ in range(epochs):
costs = []
t0 = time.time()
for inp, target in data:
cost, model = step(model, lr, inp, target)
if isinstance(cost, numpy.ndarray):
cost = float(cost)
costs.append(cost)
c = sum(costs) / n
t = time.time() - t0
print(f"Cost: {c:15.10f}\tTime: {t:15.10f}") | def run_helper(epochs, n, batch_size, layer_sizes):
"""Run a model with the specified layer sizes on n random batches.
The first layer is an LSTM layer, the rest are linear+tanh.
Arguments:
epochs: How many epochs to run.
n: Number of training batches to generate.
batch_size: Number of samples per batch.
layer_sizes: Sizes of the model's layers.
"""
layers = []
lstmp, *linp = lstm_parameters(*layer_sizes, batch_size=batch_size)
layers.append(LSTMLayer(*lstmp))
for W, b in linp:
layers.append(Linear(W, b))
layers.append(Tanh())
model = Sequential(tuple(layers))
data = generate_data(n, batch_size, layer_sizes[0], layer_sizes[-1], 10)
lr = getattr(numpy, dtype)(0.01)
for _ in range(epochs):
costs = []
t0 = time.time()
for inp, target in data:
cost, model = step(model, lr, inp, target)
if isinstance(cost, numpy.ndarray):
cost = float(cost)
costs.append(cost)
c = sum(costs) / n
t = time.time() - t0
print(f"Cost: {c:15.10f}\tTime: {t:15.10f}") |
Python | async def infer_tuple_getitem(
self, engine, arg: lib.AbstractTuple, idx: xtype.Int[64]
):
"""Infer the return type of primitive `tuple_getitem`."""
nelems = len(arg.elements)
idx_v = self.require_constant(idx, argnum=2, range=range(-nelems, nelems))
return arg.elements[idx_v] | async def infer_tuple_getitem(
self, engine, arg: lib.AbstractTuple, idx: xtype.Int[64]
):
"""Infer the return type of primitive `tuple_getitem`."""
nelems = len(arg.elements)
idx_v = self.require_constant(idx, argnum=2, range=range(-nelems, nelems))
return arg.elements[idx_v] |
Python | def full(shape, fill_value, dtype=None):
"""Main code for operation full.
Arguments:
shape: a tuple of integers
fill_value: a scalar value
dtype: either a string (e.g. 'int32')
or a numpy dtype (e.g. np.int32)
Returns:
an array
"""
if dtype is None:
dtype = typeof(fill_value)
abstract_scalar_type = to_scalar_type(dtype)
scalar_value = scalar_cast(fill_value, abstract_scalar_type)
return distribute(
myia_to_array(scalar_value, abstract_array(shape, scalar_value)), shape
) | def full(shape, fill_value, dtype=None):
"""Main code for operation full.
Arguments:
shape: a tuple of integers
fill_value: a scalar value
dtype: either a string (e.g. 'int32')
or a numpy dtype (e.g. np.int32)
Returns:
an array
"""
if dtype is None:
dtype = typeof(fill_value)
abstract_scalar_type = to_scalar_type(dtype)
scalar_value = scalar_cast(fill_value, abstract_scalar_type)
return distribute(
myia_to_array(scalar_value, abstract_array(shape, scalar_value)), shape
) |
Python | def cynode(self, id, label, classes, parent=None, node=None):
"""Build data structure for a node in cytoscape."""
if not isinstance(id, str):
if node is None:
node = id
id = self.id(id)
data = {"id": id, "label": str(label)}
if self.tooltip_gen and node:
ttip = self.tooltip_gen(self._strip_cosmetic(node))
if ttip is not None:
if not isinstance(ttip, str):
ttip = str(hrepr(ttip))
data["tooltip"] = ttip
if parent:
parent = parent if isinstance(parent, str) else self.id(parent)
data["parent"] = parent
self.nodes.append({"data": data, "classes": classes}) | def cynode(self, id, label, classes, parent=None, node=None):
"""Build data structure for a node in cytoscape."""
if not isinstance(id, str):
if node is None:
node = id
id = self.id(id)
data = {"id": id, "label": str(label)}
if self.tooltip_gen and node:
ttip = self.tooltip_gen(self._strip_cosmetic(node))
if ttip is not None:
if not isinstance(ttip, str):
ttip = str(hrepr(ttip))
data["tooltip"] = ttip
if parent:
parent = parent if isinstance(parent, str) else self.id(parent)
data["parent"] = parent
self.nodes.append({"data": data, "classes": classes}) |
Python | def cyedge(self, src_id, dest_id, label):
"""Build data structure for an edge in cytoscape."""
cl = "input-edge"
if isinstance(label, tuple):
label, cl = label
if not isinstance(label, str):
label = str(label)
if not isinstance(dest_id, str):
dest_id = self.id(dest_id)
if not isinstance(src_id, str):
src_id = self.id(src_id)
data = {
"id": f"{dest_id}-{src_id}-{label}",
"label": label,
"source": dest_id,
"target": src_id,
}
self.edges.append({"data": data, "classes": cl}) | def cyedge(self, src_id, dest_id, label):
"""Build data structure for an edge in cytoscape."""
cl = "input-edge"
if isinstance(label, tuple):
label, cl = label
if not isinstance(label, str):
label = str(label)
if not isinstance(dest_id, str):
dest_id = self.id(dest_id)
if not isinstance(src_id, str):
src_id = self.id(src_id)
data = {
"id": f"{dest_id}-{src_id}-{label}",
"label": label,
"source": dest_id,
"target": src_id,
}
self.edges.append({"data": data, "classes": cl}) |
Python | def const_fn(self, node):
"""
Return name of function, if constant.
Given an `Apply` node of a constant function, return the
name of that function, otherwise return None.
"""
return self.labeler.const_fn(node) | def const_fn(self, node):
"""
Return name of function, if constant.
Given an `Apply` node of a constant function, return the
name of that function, otherwise return None.
"""
return self.labeler.const_fn(node) |
Python | def add_graph(self, g):
"""Create a node for a graph."""
if g in self.processed:
return
if self.beautify:
g = cosmetic_transformer(g)
name = self.name(g)
argnames = [self.name(p) for p in g.parameters]
lbl = f'{name}({", ".join(argnames)})'
classes = ["function", "focus" if g in self.focus else ""]
self.cynode(id=g, label=lbl, classes=" ".join(classes))
self.processed.add(g) | def add_graph(self, g):
"""Create a node for a graph."""
if g in self.processed:
return
if self.beautify:
g = cosmetic_transformer(g)
name = self.name(g)
argnames = [self.name(p) for p in g.parameters]
lbl = f'{name}({", ".join(argnames)})'
classes = ["function", "focus" if g in self.focus else ""]
self.cynode(id=g, label=lbl, classes=" ".join(classes))
self.processed.add(g) |
Python | def process_node_generic(self, node, g, cl):
"""Create node and edges for a node."""
lbl = self.label(node)
self.cynode(id=node, label=lbl, parent=g, classes=cl)
fn = node.inputs[0] if node.inputs else None
if fn and fn.is_constant_graph():
self.graphs.add(fn.value)
for inp in node.inputs:
if inp.is_constant_graph():
self.cyedge(src_id=g, dest_id=inp.value, label=("", "use-edge"))
edges = []
if fn and not (fn.is_constant() and self.function_in_node):
edges.append((node, "F", fn))
edges += [
(node, i + 1, inp) for i, inp in enumerate(node.inputs[1:]) or []
]
self.process_edges(edges) | def process_node_generic(self, node, g, cl):
"""Create node and edges for a node."""
lbl = self.label(node)
self.cynode(id=node, label=lbl, parent=g, classes=cl)
fn = node.inputs[0] if node.inputs else None
if fn and fn.is_constant_graph():
self.graphs.add(fn.value)
for inp in node.inputs:
if inp.is_constant_graph():
self.cyedge(src_id=g, dest_id=inp.value, label=("", "use-edge"))
edges = []
if fn and not (fn.is_constant() and self.function_in_node):
edges.append((node, "F", fn))
edges += [
(node, i + 1, inp) for i, inp in enumerate(node.inputs[1:]) or []
]
self.process_edges(edges) |
Python | def class_gen(self, node, cl=None):
"""Generate the class name for this node."""
g = node.graph
if cl is not None:
pass
elif node in self.returns:
cl = "output"
elif node.is_parameter():
cl = "input"
if node not in g.parameters:
cl += " unlisted"
elif node.is_constant():
cl = "constant"
elif node.is_special():
cl = f"special-{type(node.special).__name__}"
else:
cl = "intermediate"
if _has_error(node.debug):
cl += " error"
if self._class_gen:
return self._class_gen(self._strip_cosmetic(node), cl)
else:
return cl | def class_gen(self, node, cl=None):
"""Generate the class name for this node."""
g = node.graph
if cl is not None:
pass
elif node in self.returns:
cl = "output"
elif node.is_parameter():
cl = "input"
if node not in g.parameters:
cl += " unlisted"
elif node.is_constant():
cl = "constant"
elif node.is_special():
cl = f"special-{type(node.special).__name__}"
else:
cl = "intermediate"
if _has_error(node.debug):
cl += " error"
if self._class_gen:
return self._class_gen(self._strip_cosmetic(node), cl)
else:
return cl |
Python | def process_node(self, node):
"""Create node and edges for a node."""
if node in self.processed:
return
g = node.graph
self.follow(node)
cl = self.class_gen(node)
if g and g not in self.processed:
self.add_graph(g)
if node.inputs and node.inputs[0].is_constant():
fn = node.inputs[0].value
if fn in cosmetics:
cosmetics[fn](self, node, g, cl)
elif hasattr(fn, "graph_display"):
fn.graph_display(self, node, g, cl)
else:
self.process_node_generic(node, g, cl)
else:
self.process_node_generic(node, g, cl)
self.processed.add(node) | def process_node(self, node):
"""Create node and edges for a node."""
if node in self.processed:
return
g = node.graph
self.follow(node)
cl = self.class_gen(node)
if g and g not in self.processed:
self.add_graph(g)
if node.inputs and node.inputs[0].is_constant():
fn = node.inputs[0].value
if fn in cosmetics:
cosmetics[fn](self, node, g, cl)
elif hasattr(fn, "graph_display"):
fn.graph_display(self, node, g, cl)
else:
self.process_node_generic(node, g, cl)
else:
self.process_node_generic(node, g, cl)
self.processed.add(node) |
Python | def process(self):
"""Process all graphs in entry_points."""
if self.nodes or self.edges:
return
while self.graphs:
g = self.graphs.pop()
self.process_graph(g)
return self.nodes, self.edges | def process(self):
"""Process all graphs in entry_points."""
if self.nodes or self.edges:
return
while self.graphs:
g = self.graphs.pop()
self.process_graph(g)
return self.nodes, self.edges |
Python | def add_graph(self, g):
"""Create a node for a graph."""
name = self.name(g)
argnames = [self.name(p) for p in g.parameters]
lbl = f'{name}({", ".join(argnames)})'
classes = ["function", "focus" if g in self.focus else ""]
self.cynode(id=g, label=lbl, classes=" ".join(classes))
# self.processed.add(g) | def add_graph(self, g):
"""Create a node for a graph."""
name = self.name(g)
argnames = [self.name(p) for p in g.parameters]
lbl = f'{name}({", ".join(argnames)})'
classes = ["function", "focus" if g in self.focus else ""]
self.cynode(id=g, label=lbl, classes=" ".join(classes))
# self.processed.add(g) |
Python | def process_node_generic(self, node, g, cl):
"""Create node and edges for a node."""
if node.is_constant() and self.duplicate_constants:
return
lbl = self.label(node)
self.cynode(id=node, label=lbl, parent=g, classes=cl)
fn = node.inputs[0] if node.inputs else None
if fn and fn.is_constant_graph():
self.graphs.add(fn.value)
for inp in node.inputs:
if inp.is_constant_graph():
self.cyedge(src_id=g, dest_id=inp.value, label=("", "use-edge"))
edges = []
if fn and not (fn.is_constant() and self.function_in_node):
edges.append((node, "F", fn))
edges += [
(node, i + 1, inp) for i, inp in enumerate(node.inputs[1:]) or []
]
self.process_edges(edges) | def process_node_generic(self, node, g, cl):
"""Create node and edges for a node."""
if node.is_constant() and self.duplicate_constants:
return
lbl = self.label(node)
self.cynode(id=node, label=lbl, parent=g, classes=cl)
fn = node.inputs[0] if node.inputs else None
if fn and fn.is_constant_graph():
self.graphs.add(fn.value)
for inp in node.inputs:
if inp.is_constant_graph():
self.cyedge(src_id=g, dest_id=inp.value, label=("", "use-edge"))
edges = []
if fn and not (fn.is_constant() and self.function_in_node):
edges.append((node, "F", fn))
edges += [
(node, i + 1, inp) for i, inp in enumerate(node.inputs[1:]) or []
]
self.process_edges(edges) |
Python | def process_node(self, node):
"""Create node and edges for a node."""
# if node in self.processed:
# return
g = node.graph
# self.follow(node)
cl = self.class_gen(node)
if node.inputs and node.inputs[0].is_constant():
fn = node.inputs[0].value
if fn in cosmetics:
cosmetics[fn](self, node, g, cl)
elif hasattr(fn, "graph_display"):
fn.graph_display(self, node, g, cl)
else:
self.process_node_generic(node, g, cl)
else:
self.process_node_generic(node, g, cl) | def process_node(self, node):
"""Create node and edges for a node."""
# if node in self.processed:
# return
g = node.graph
# self.follow(node)
cl = self.class_gen(node)
if node.inputs and node.inputs[0].is_constant():
fn = node.inputs[0].value
if fn in cosmetics:
cosmetics[fn](self, node, g, cl)
elif hasattr(fn, "graph_display"):
fn.graph_display(self, node, g, cl)
else:
self.process_node_generic(node, g, cl)
else:
self.process_node_generic(node, g, cl) |
Python | def process(self):
"""Process all graphs in entry_points."""
if self.nodes or self.edges:
return
for g in self.graphs:
self.add_graph(g)
for node in self.todo:
self.process_node(node)
return self.nodes, self.edges | def process(self):
"""Process all graphs in entry_points."""
if self.nodes or self.edges:
return
for g in self.graphs:
self.add_graph(g)
for node in self.todo:
self.process_node(node)
return self.nodes, self.edges |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.