language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def pytorch_dtype_to_type(dtype):
"""Map a pytorch dtype to a myia type."""
import torch
_type_map = {
torch.int8: Int[8],
torch.int16: Int[16],
torch.int32: Int[32],
torch.int64: Int[64],
torch.uint8: UInt[8],
torch.float16: Float[16],
torch.float32: Float[32],
torch.float64: Float[64],
torch.bool: Bool,
}
if dtype not in _type_map:
raise TypeError(f"Unsupported dtype {dtype}")
return _type_map[dtype] | def pytorch_dtype_to_type(dtype):
"""Map a pytorch dtype to a myia type."""
import torch
_type_map = {
torch.int8: Int[8],
torch.int16: Int[16],
torch.int32: Int[32],
torch.int64: Int[64],
torch.uint8: UInt[8],
torch.float16: Float[16],
torch.float32: Float[32],
torch.float64: Float[64],
torch.bool: Bool,
}
if dtype not in _type_map:
raise TypeError(f"Unsupported dtype {dtype}")
return _type_map[dtype] |
Python | def mt(*tests, **kwargs):
"""Multitest.
All MyiaFunctionTest instances in the list of tests will be run on the same
function. If kwargs are provided, they will be given to all the tests.
"""
def deco(fn):
def runtest(test):
test.run(fn)
pytests = []
for test in tests:
test = test.configure(**kwargs)
pytests += test.generate_params()
runtest = pytest.mark.parametrize("test", pytests)(runtest)
return runtest
return deco | def mt(*tests, **kwargs):
"""Multitest.
All MyiaFunctionTest instances in the list of tests will be run on the same
function. If kwargs are provided, they will be given to all the tests.
"""
def deco(fn):
def runtest(test):
test.run(fn)
pytests = []
for test in tests:
test = test.configure(**kwargs)
pytests += test.generate_params()
runtest = pytest.mark.parametrize("test", pytests)(runtest)
return runtest
return deco |
Python | def check(self, run, args, expected, **kwargs):
"""Check the result of run() against expected.
Expected can be either:
* A value, which will be compared using eqtest.
* A subclass of Exception, which run() is expected to raise.
* A callable, which can run custom checks.
"""
message = None
if isinstance(expected, Exception):
message = expected.args[0]
expected = type(expected)
if isinstance(expected, type) and issubclass(expected, Exception):
try:
res = run(args)
except expected as err:
if message is not None and message not in err.args[0]:
raise
else:
raise
else:
res = run(args)
if isinstance(expected, FunctionType):
if not expected(args, res):
raise Exception(f"Failed the result check function")
elif not eqtest(res, expected, **kwargs):
raise Exception(f"Mismatch: expected {expected}, got {res}") | def check(self, run, args, expected, **kwargs):
"""Check the result of run() against expected.
Expected can be either:
* A value, which will be compared using eqtest.
* A subclass of Exception, which run() is expected to raise.
* A callable, which can run custom checks.
"""
message = None
if isinstance(expected, Exception):
message = expected.args[0]
expected = type(expected)
if isinstance(expected, type) and issubclass(expected, Exception):
try:
res = run(args)
except expected as err:
if message is not None and message not in err.args[0]:
raise
else:
raise
else:
res = run(args)
if isinstance(expected, FunctionType):
if not expected(args, res):
raise Exception(f"Failed the result check function")
elif not eqtest(res, expected, **kwargs):
raise Exception(f"Mismatch: expected {expected}, got {res}") |
Python | def generate_params(self):
"""Generate pytest parameters.
If any of the kwargs is an instance of Multiple, we will generate tests
for each possible value it can take. If there are multiple Multiples,
we will test a cartesian product of them.
"""
marks = self.spec.get("marks", [])
id = self.spec.get("id", "test")
spec = dict(self.spec)
for key in ("marks", "id"):
if key in spec:
del spec[key]
multis = [
(k, v) for k, v in self.spec.items() if isinstance(v, Multiple)
]
options = list(product(*[v.options for _, v in multis]))
params = []
for option in options:
curr_spec = dict(spec)
curr_ids = []
curr_marks = list(marks)
for (spec_k, _), opt_info in zip(multis, option):
mul, opt_id, opt_marks, value = opt_info
curr_spec[spec_k] = value
curr_ids.append(opt_id)
curr_marks += opt_marks
curr_ids.append(id)
p = pytest.param(
MyiaFunctionTest(self.runtest, curr_spec),
marks=curr_marks,
id="-".join(curr_ids),
)
params.append(p)
return params | def generate_params(self):
"""Generate pytest parameters.
If any of the kwargs is an instance of Multiple, we will generate tests
for each possible value it can take. If there are multiple Multiples,
we will test a cartesian product of them.
"""
marks = self.spec.get("marks", [])
id = self.spec.get("id", "test")
spec = dict(self.spec)
for key in ("marks", "id"):
if key in spec:
del spec[key]
multis = [
(k, v) for k, v in self.spec.items() if isinstance(v, Multiple)
]
options = list(product(*[v.options for _, v in multis]))
params = []
for option in options:
curr_spec = dict(spec)
curr_ids = []
curr_marks = list(marks)
for (spec_k, _), opt_info in zip(multis, option):
mul, opt_id, opt_marks, value = opt_info
curr_spec[spec_k] = value
curr_ids.append(opt_id)
curr_marks += opt_marks
curr_ids.append(id)
p = pytest.param(
MyiaFunctionTest(self.runtest, curr_spec),
marks=curr_marks,
id="-".join(curr_ids),
)
params.append(p)
return params |
Python | def backend_except(*excluded_backends):
"""Return backend_all without excluded backends."""
return Multiple(
*[
param
for backend, target, param in _get_backend_testing_parameters()
if target == "cpu" and backend not in excluded_backends
]
) | def backend_except(*excluded_backends):
"""Return backend_all without excluded backends."""
return Multiple(
*[
param
for backend, target, param in _get_backend_testing_parameters()
if target == "cpu" and backend not in excluded_backends
]
) |
Python | def bt(*primitives):
"""Backend testing.
Generate a decorator to parametrize a test with backend names.
Decorated test function must expected an argument named "backend"
that will receive backend name to test with.
:param primitives: list of primitives or groups of primitives
(as PrimGroup objects) required for this test.
If a backend does not support any of given primitives or groups,
then related test is explicitly skipped.
:return: a decorator
"""
if primitives:
primitives = [PrimGroup.ensure(p) for p in primitives]
backends = [
pytest.param(backend, id=backend, marks=[getattr(pytest.mark, backend)])
for backend in sorted(_pytest_parameters)
]
def deco(fn):
@functools.wraps(fn)
def wrapper_fn(*args, **kwargs):
if primitives:
backend = kwargs["backend"]
bck = load_backend(backend)
for prim_group in primitives:
if not bck.supports_prim_group(prim_group):
pytest.skip(
f"Backend {backend} does not support {prim_group}"
)
return fn(*args, **kwargs)
return pytest.mark.parametrize("backend", backends)(wrapper_fn)
return deco | def bt(*primitives):
"""Backend testing.
Generate a decorator to parametrize a test with backend names.
Decorated test function must expected an argument named "backend"
that will receive backend name to test with.
:param primitives: list of primitives or groups of primitives
(as PrimGroup objects) required for this test.
If a backend does not support any of given primitives or groups,
then related test is explicitly skipped.
:return: a decorator
"""
if primitives:
primitives = [PrimGroup.ensure(p) for p in primitives]
backends = [
pytest.param(backend, id=backend, marks=[getattr(pytest.mark, backend)])
for backend in sorted(_pytest_parameters)
]
def deco(fn):
@functools.wraps(fn)
def wrapper_fn(*args, **kwargs):
if primitives:
backend = kwargs["backend"]
bck = load_backend(backend)
for prim_group in primitives:
if not bck.supports_prim_group(prim_group):
pytest.skip(
f"Backend {backend} does not support {prim_group}"
)
return fn(*args, **kwargs)
return pytest.mark.parametrize("backend", backends)(wrapper_fn)
return deco |
Python | def populate(self):
"""Generate all the new nodes."""
for graph in self.graphs:
target_graph = self.get_graph(graph)
mng = self.manager
for p in graph.parameters:
self.gen_parameter(graph, target_graph, p)
for node in mng.nodes[graph]:
if node.is_apply():
self.gen_apply(graph, target_graph, node)
elif node.is_parameter() and node not in graph.parameters:
self.gen_rogue_parameter(graph, target_graph, node)
if self.gen_child is not NotImplemented:
for child in mng.children[graph]:
self.gen_child(graph, target_graph, child)
if self.gen_fv_direct is not NotImplemented:
for node in mng.free_variables_direct[graph]:
self.gen_fv_direct(graph, target_graph, node)
if self.gen_fv is not NotImplemented:
for node in mng.free_variables_total[graph]:
if isinstance(node, ANFNode):
self.gen_fv(graph, target_graph, node)
if self.gen_fv_graph is not NotImplemented:
for node in mng.free_variables_total[graph]:
if isinstance(node, Graph):
self.gen_fv_graph(graph, target_graph, node)
if self.gen_fv_extended is not NotImplemented:
for node in mng.free_variables_extended[graph]:
self.gen_fv_extended(graph, target_graph, node)
for ct in mng.constants[graph]:
if ct.is_constant_graph():
self.gen_constant_graph(graph, target_graph, ct)
else:
self.gen_constant(graph, target_graph, ct) | def populate(self):
"""Generate all the new nodes."""
for graph in self.graphs:
target_graph = self.get_graph(graph)
mng = self.manager
for p in graph.parameters:
self.gen_parameter(graph, target_graph, p)
for node in mng.nodes[graph]:
if node.is_apply():
self.gen_apply(graph, target_graph, node)
elif node.is_parameter() and node not in graph.parameters:
self.gen_rogue_parameter(graph, target_graph, node)
if self.gen_child is not NotImplemented:
for child in mng.children[graph]:
self.gen_child(graph, target_graph, child)
if self.gen_fv_direct is not NotImplemented:
for node in mng.free_variables_direct[graph]:
self.gen_fv_direct(graph, target_graph, node)
if self.gen_fv is not NotImplemented:
for node in mng.free_variables_total[graph]:
if isinstance(node, ANFNode):
self.gen_fv(graph, target_graph, node)
if self.gen_fv_graph is not NotImplemented:
for node in mng.free_variables_total[graph]:
if isinstance(node, Graph):
self.gen_fv_graph(graph, target_graph, node)
if self.gen_fv_extended is not NotImplemented:
for node in mng.free_variables_extended[graph]:
self.gen_fv_extended(graph, target_graph, node)
for ct in mng.constants[graph]:
if ct.is_constant_graph():
self.gen_constant_graph(graph, target_graph, ct)
else:
self.gen_constant(graph, target_graph, ct) |
Python | def gen_apply(self, graph, new_graph, node):
"""Makes an empty Apply node (to link later)."""
with About(node.debug, self.relation):
new = Apply([], new_graph)
self.remap_node(node, graph, node, new_graph, new) | def gen_apply(self, graph, new_graph, node):
"""Makes an empty Apply node (to link later)."""
with About(node.debug, self.relation):
new = Apply([], new_graph)
self.remap_node(node, graph, node, new_graph, new) |
Python | def gen_constant(self, graph, new_graph, constant):
"""Makes a copy of the constant with the same value."""
with About(constant.debug, self.relation):
new = Constant(constant.value)
self.remap_node(constant, graph, constant, new_graph, new) | def gen_constant(self, graph, new_graph, constant):
"""Makes a copy of the constant with the same value."""
with About(constant.debug, self.relation):
new = Constant(constant.value)
self.remap_node(constant, graph, constant, new_graph, new) |
Python | def remap_node(self, key, graph, node, new_graph, new_node, link=None):
"""Remap the given node as normal and copy abstract and annotation."""
nn = super().remap_node(key, graph, node, new_graph, new_node, link)
if self.set_abstract and nn.abstract is None:
nn.abstract = node.abstract
if self.set_annotation and nn.annotation is None:
nn.annotation = node.annotation
return nn | def remap_node(self, key, graph, node, new_graph, new_node, link=None):
"""Remap the given node as normal and copy abstract and annotation."""
nn = super().remap_node(key, graph, node, new_graph, new_node, link)
if self.set_abstract and nn.abstract is None:
nn.abstract = node.abstract
if self.set_annotation and nn.annotation is None:
nn.annotation = node.annotation
return nn |
Python | def gen_graph(self, graph):
"""Generate a new graph unless the graph is meant to be inlined.
Copies flags and transforms.
"""
if graph in self.inlines:
target_graph, new_params = self.inlines[graph]
for p, new_p in zip(graph.parameters, new_params):
self.repl[p] = new_p
elif graph not in self.graph_repl:
self.graph_repl[graph] = graph.make_new(self.graph_relation) | def gen_graph(self, graph):
"""Generate a new graph unless the graph is meant to be inlined.
Copies flags and transforms.
"""
if graph in self.inlines:
target_graph, new_params = self.inlines[graph]
for p, new_p in zip(graph.parameters, new_params):
self.repl[p] = new_p
elif graph not in self.graph_repl:
self.graph_repl[graph] = graph.make_new(self.graph_relation) |
Python | def gen_constant_graph(self, graph, new_graph, constant):
"""Generate a constant for the cloned graph when applicable."""
g = constant.value
if g not in self.inlines and g in self.graph_repl:
target_graph = self.get_graph(g)
with About(constant.debug, self.relation):
new = Constant(target_graph)
self.remap_node(constant, graph, constant, new_graph, new) | def gen_constant_graph(self, graph, new_graph, constant):
"""Generate a constant for the cloned graph when applicable."""
g = constant.value
if g not in self.inlines and g in self.graph_repl:
target_graph = self.get_graph(g)
with About(constant.debug, self.relation):
new = Constant(target_graph)
self.remap_node(constant, graph, constant, new_graph, new) |
Python | def run(self):
"""Run all remappers.
All remappers generate their graphs first, then the nodes are
populated first, then linked, then their graphs are finalized.
"""
remappers = self.remappers.values()
for remapper in remappers:
remapper.generate()
for remapper in remappers:
remapper.populate()
for remapper in remappers:
remapper.link()
for remapper in remappers:
remapper.finalize() | def run(self):
"""Run all remappers.
All remappers generate their graphs first, then the nodes are
populated first, then linked, then their graphs are finalized.
"""
remappers = self.remappers.values()
for remapper in remappers:
remapper.generate()
for remapper in remappers:
remapper.populate()
for remapper in remappers:
remapper.link()
for remapper in remappers:
remapper.finalize() |
Python | def collect_graphs(self, graphs, inlines):
"""Collect the full set of graphs to clone.
This set will include the scopes of the graphs if clone_constants
is True, as well as any graphs they use if total is True.
"""
def expand_clones(graph):
if self.clone_children:
self.graphs.update(self.manager.scopes[graph] - {graph})
if self.total:
self.graphs.update(self.manager.graphs_reachable[graph])
self.graphs = set()
self.inlines = {}
for graph in graphs:
self.graphs.add(graph)
expand_clones(graph)
for graph, target_graph, new_params in inlines:
self.inlines[graph] = (target_graph, new_params)
expand_clones(graph)
if set(self.inlines) & self.graphs:
msg = "Trying to clone and inline a graph at the same time."
if self.total:
msg += " Try setting the `total` option to False."
raise Exception(msg)
self.graphs.update(self.inlines) | def collect_graphs(self, graphs, inlines):
"""Collect the full set of graphs to clone.
This set will include the scopes of the graphs if clone_constants
is True, as well as any graphs they use if total is True.
"""
def expand_clones(graph):
if self.clone_children:
self.graphs.update(self.manager.scopes[graph] - {graph})
if self.total:
self.graphs.update(self.manager.graphs_reachable[graph])
self.graphs = set()
self.inlines = {}
for graph in graphs:
self.graphs.add(graph)
expand_clones(graph)
for graph, target_graph, new_params in inlines:
self.inlines[graph] = (target_graph, new_params)
expand_clones(graph)
if set(self.inlines) & self.graphs:
msg = "Trying to clone and inline a graph at the same time."
if self.total:
msg += " Try setting the `total` option to False."
raise Exception(msg)
self.graphs.update(self.inlines) |
Python | def clone(
g, total=True, relation="copy", clone_constants=False, graph_relation=None,
):
"""Return a clone of g."""
return GraphCloner(
g,
total=total,
relation=relation,
clone_constants=clone_constants,
graph_relation=graph_relation,
)[g] | def clone(
g, total=True, relation="copy", clone_constants=False, graph_relation=None,
):
"""Return a clone of g."""
return GraphCloner(
g,
total=total,
relation=relation,
clone_constants=clone_constants,
graph_relation=graph_relation,
)[g] |
Python | def transformable_clone(graph, relation="transform"):
"""Return a clone of the graph that can be safely transformed.
If the graph is recursive, recursive calls will point to the original
graph and not to the clone. This allows us to modify the returned graph
safely, without messing up the recursive call sites.
"""
with About(graph.debug, relation):
newg = Graph()
for p in graph.parameters:
with About(p.debug, "copy"):
p2 = newg.add_parameter()
p2.abstract = p.abstract
cl = GraphCloner(inline=(graph, newg, newg.parameters))
newg.output = cl[graph.output]
return newg | def transformable_clone(graph, relation="transform"):
"""Return a clone of the graph that can be safely transformed.
If the graph is recursive, recursive calls will point to the original
graph and not to the clone. This allows us to modify the returned graph
safely, without messing up the recursive call sites.
"""
with About(graph.debug, relation):
newg = Graph()
for p in graph.parameters:
with About(p.debug, "copy"):
p2 = newg.add_parameter()
p2.abstract = p.abstract
cl = GraphCloner(inline=(graph, newg, newg.parameters))
newg.output = cl[graph.output]
return newg |
Python | async def infer_scatter(
self,
engine,
input: lib.AbstractArray,
dim: xtype.UInt[64],
index: lib.AbstractArray,
src: lib.AbstractArray,
):
"""Infer the return type of primitive `scatter`."""
return input | async def infer_scatter(
self,
engine,
input: lib.AbstractArray,
dim: xtype.UInt[64],
index: lib.AbstractArray,
src: lib.AbstractArray,
):
"""Infer the return type of primitive `scatter`."""
return input |
Python | def bprop_scatter(x, dim, index, src, out, dout):
"""Backpropagator for primitive `scatter`."""
x_grad = scatter(dout, dim, index, zeros_like(src))
src_grad = gather(dout, dim, index)
return (x_grad, zeros_like(dim), zeros_like(index), src_grad) | def bprop_scatter(x, dim, index, src, out, dout):
"""Backpropagator for primitive `scatter`."""
x_grad = scatter(dout, dim, index, zeros_like(src))
src_grad = gather(dout, dim, index)
return (x_grad, zeros_like(dim), zeros_like(index), src_grad) |
Python | async def infer_make_handle(
self, engine, typ: AbstractType, universe: xtype.UniverseType
):
"""Infer the return type of primitive `make_handle`."""
return AbstractTuple(
(
AbstractScalar({VALUE: ANYTHING, TYPE: xtype.UniverseType}),
AbstractHandle(typ.element),
)
) | async def infer_make_handle(
self, engine, typ: AbstractType, universe: xtype.UniverseType
):
"""Infer the return type of primitive `make_handle`."""
return AbstractTuple(
(
AbstractScalar({VALUE: ANYTHING, TYPE: xtype.UniverseType}),
AbstractHandle(typ.element),
)
) |
Python | def parse(func, use_universe=False):
"""Parse a function into a Myia graph.
The result of the parsing is cached: multiple calls to parse on the same
function will return the same graph. It should therefore be cloned prior
to manipulation.
"""
flags = dict(getattr(func, "_myia_flags", {}))
if "use_universe" in flags:
use_universe = flags["use_universe"]
del flags["use_universe"]
key = (func, use_universe)
if key in _parse_cache:
return _parse_cache[key]
if "name" in flags:
name = flags["name"]
del flags["name"]
else:
name = None
inner_flags = {
flag: True for flag, value in flags.items() if value == "inner"
}
flags.update(inner_flags)
parser = Parser(func, recflags=flags, use_universe=use_universe)
graph = parser.parse()
for flag in inner_flags:
del graph.flags[flag]
if name is not None:
graph.debug.name = name
_parse_cache[key] = graph
return graph | def parse(func, use_universe=False):
"""Parse a function into a Myia graph.
The result of the parsing is cached: multiple calls to parse on the same
function will return the same graph. It should therefore be cloned prior
to manipulation.
"""
flags = dict(getattr(func, "_myia_flags", {}))
if "use_universe" in flags:
use_universe = flags["use_universe"]
del flags["use_universe"]
key = (func, use_universe)
if key in _parse_cache:
return _parse_cache[key]
if "name" in flags:
name = flags["name"]
del flags["name"]
else:
name = None
inner_flags = {
flag: True for flag, value in flags.items() if value == "inner"
}
flags.update(inner_flags)
parser = Parser(func, recflags=flags, use_universe=use_universe)
graph = parser.parse()
for flag in inner_flags:
del graph.flags[flag]
if name is not None:
graph.debug.name = name
_parse_cache[key] = graph
return graph |
Python | def _eval_ast_node(self, node):
"""Evaluate an AST node as a Python expression in function context."""
# eval() does not accept an AST node, so we first convert AST node
# to string code.
code = astunparse.unparse(node).strip()
# Then we evaluate it with eval().
return eval(code, self.function.__globals__, self.closure_namespace) | def _eval_ast_node(self, node):
"""Evaluate an AST node as a Python expression in function context."""
# eval() does not accept an AST node, so we first convert AST node
# to string code.
code = astunparse.unparse(node).strip()
# Then we evaluate it with eval().
return eval(code, self.function.__globals__, self.closure_namespace) |
Python | def make_location(self, node) -> Location:
"""Create a Location from an AST node."""
if isinstance(node, (list, tuple)):
if len(node) == 0:
return None
node0 = node[0]
node1 = node[-1]
else:
node0 = node
node1 = node
if hasattr(node0, "lineno") and hasattr(node0, "col_offset"):
li1, col1 = node0.first_token.start
li2, col2 = node1.last_token.end
li1 += self.line_offset - 1
li2 += self.line_offset - 1
col1 += self.col_offset
col2 += self.col_offset
return Location(self.filename, li1, col1, li2, col2, node)
else:
# Some nodes like Index carry no location information, but
# we basically just pass through them.
return None | def make_location(self, node) -> Location:
"""Create a Location from an AST node."""
if isinstance(node, (list, tuple)):
if len(node) == 0:
return None
node0 = node[0]
node1 = node[-1]
else:
node0 = node
node1 = node
if hasattr(node0, "lineno") and hasattr(node0, "col_offset"):
li1, col1 = node0.first_token.start
li2, col2 = node1.last_token.end
li1 += self.line_offset - 1
li2 += self.line_offset - 1
col1 += self.col_offset
col2 += self.col_offset
return Location(self.filename, li1, col1, li2, col2, node)
else:
# Some nodes like Index carry no location information, but
# we basically just pass through them.
return None |
Python | def make_condition_blocks(self, block):
"""Make two blocks for an if statement or expression."""
with About(block.graph.debug, "if_true"):
true_block = self.new_block(auxiliary=True)
true_block.preds.append(block)
true_block.mature()
with About(block.graph.debug, "if_false"):
false_block = self.new_block(auxiliary=True)
false_block.preds.append(block)
false_block.mature()
return true_block, false_block | def make_condition_blocks(self, block):
"""Make two blocks for an if statement or expression."""
with About(block.graph.debug, "if_true"):
true_block = self.new_block(auxiliary=True)
true_block.preds.append(block)
true_block.mature()
with About(block.graph.debug, "if_false"):
false_block = self.new_block(auxiliary=True)
false_block.preds.append(block)
false_block.mature()
return true_block, false_block |
Python | def parse(self) -> Graph:
"""Parse the function into a Myia graph."""
src0 = inspect.getsource(self.function)
src = textwrap.dedent(src0)
# We need col_offset to compensate for the dedent
self.col_offset = len(src0.split("\n")[0]) - len(src.split("\n")[0])
tree = asttokens.ASTTokens(src, parse=True).tree
function_def = tree.body[0]
assert isinstance(function_def, ast.FunctionDef)
main_block, _finalize = self._create_function(None, function_def)
_finalize()
for node in dfs(main_block.graph.return_, succ_deeper):
if node.is_constant_graph():
if node.value.return_ is None:
raise MyiaSyntaxError(
"Function doesn't return a value in all cases",
node.value.debug.find(
"location",
skip={"while_after", "for_after", "if_after"},
),
)
diff_cache = self.write_cache - self.read_cache
if diff_cache:
for _, varname, node in diff_cache:
if varname != "_":
warnings.warn(
MyiaDisconnectedCodeWarning(
f"{varname} is not used "
+ f"and will therefore not be computed",
node.debug.location,
)
)
self.write_cache = OrderedSet()
self.read_cache = OrderedSet()
return main_block.graph | def parse(self) -> Graph:
"""Parse the function into a Myia graph."""
src0 = inspect.getsource(self.function)
src = textwrap.dedent(src0)
# We need col_offset to compensate for the dedent
self.col_offset = len(src0.split("\n")[0]) - len(src.split("\n")[0])
tree = asttokens.ASTTokens(src, parse=True).tree
function_def = tree.body[0]
assert isinstance(function_def, ast.FunctionDef)
main_block, _finalize = self._create_function(None, function_def)
_finalize()
for node in dfs(main_block.graph.return_, succ_deeper):
if node.is_constant_graph():
if node.value.return_ is None:
raise MyiaSyntaxError(
"Function doesn't return a value in all cases",
node.value.debug.find(
"location",
skip={"while_after", "for_after", "if_after"},
),
)
diff_cache = self.write_cache - self.read_cache
if diff_cache:
for _, varname, node in diff_cache:
if varname != "_":
warnings.warn(
MyiaDisconnectedCodeWarning(
f"{varname} is not used "
+ f"and will therefore not be computed",
node.debug.location,
)
)
self.write_cache = OrderedSet()
self.read_cache = OrderedSet()
return main_block.graph |
Python | def _create_function(
self, block: Optional["Block"], node: ast.FunctionDef
) -> Tuple["Block", "Block"]:
"""Process a function definition and return block and finalizer.
To process the statements in the function the finalizer must be
called with no arguments. It should only be called after processing
the statements in the parent function so that mutual recursion and
forward references can be resolved.
"""
with DebugInherit(ast=node, location=self.make_location(node)):
function_block = self.new_block(type="function")
if block:
function_block.preds.append(block)
else:
# This is the top-level function, so we set self.graph
self.graph = function_block.graph
function_block.mature()
function_block.graph.debug.name = node.name
# Use the same priority as python where an argument with the
# same name will mask the function.
graph = function_block.graph
function_block.write(node.name, Constant(graph), track=False)
def _finalize():
return self._finalize_function(node, function_block)
return function_block, _finalize | def _create_function(
self, block: Optional["Block"], node: ast.FunctionDef
) -> Tuple["Block", "Block"]:
"""Process a function definition and return block and finalizer.
To process the statements in the function the finalizer must be
called with no arguments. It should only be called after processing
the statements in the parent function so that mutual recursion and
forward references can be resolved.
"""
with DebugInherit(ast=node, location=self.make_location(node)):
function_block = self.new_block(type="function")
if block:
function_block.preds.append(block)
else:
# This is the top-level function, so we set self.graph
self.graph = function_block.graph
function_block.mature()
function_block.graph.debug.name = node.name
# Use the same priority as python where an argument with the
# same name will mask the function.
graph = function_block.graph
function_block.write(node.name, Constant(graph), track=False)
def _finalize():
return self._finalize_function(node, function_block)
return function_block, _finalize |
Python | def process_statements(
self, starting_block: "Block", nodes: List[ast.stmt]
) -> "Block":
"""Process a sequence of statements.
If the list of statements is empty, a new empty block will be
constructed and returned. This ensures that empty code blocks (e.g. an
empty else branch) still have a corresponding block that can be used to
call the continuation from.
"""
block = starting_block
for node in nodes:
block = self.process_node(block, node, used=False)
return block | def process_statements(
self, starting_block: "Block", nodes: List[ast.stmt]
) -> "Block":
"""Process a sequence of statements.
If the list of statements is empty, a new empty block will be
constructed and returned. This ensures that empty code blocks (e.g. an
empty else branch) still have a corresponding block that can be used to
call the continuation from.
"""
block = starting_block
for node in nodes:
block = self.process_node(block, node, used=False)
return block |
Python | def process_Expr(self, block: "Block", node: ast.Expr) -> "Block":
"""Process an expression statement.
This ignores the statement.
"""
if self.use_universe:
self.process_node(block, node.value)
else:
if self.flag_used == 0 and not isinstance(node.value, ast.Str):
warnings.warn(
MyiaDisconnectedCodeWarning(
f"Expression was not assigned to a variable."
+ f"\n\tAs a result, it is not connected to the output "
+ f"and will not be executed.",
self.make_location(node),
)
)
return block | def process_Expr(self, block: "Block", node: ast.Expr) -> "Block":
"""Process an expression statement.
This ignores the statement.
"""
if self.use_universe:
self.process_node(block, node.value)
else:
if self.flag_used == 0 and not isinstance(node.value, ast.Str):
warnings.warn(
MyiaDisconnectedCodeWarning(
f"Expression was not assigned to a variable."
+ f"\n\tAs a result, it is not connected to the output "
+ f"and will not be executed.",
self.make_location(node),
)
)
return block |
Python | def process_If(self, block: "Block", node: ast.If) -> "Block":
"""Process a conditional statement.
A conditional statement generates 3 functions: The true branch, the
false branch, and the continuation.
"""
# Process the condition
cond = self.process_node(block, node.test)
# Create two branches
true_block, false_block = self.make_condition_blocks(block)
true_block.graph.debug.location = self.make_location(node.body)
false_block.graph.debug.location = self.make_location(node.orelse)
# Create the continuation
with About(block.graph.debug, "if_after"):
after_block = self.new_block(auxiliary=True)
# Process the first branch
true_end = self.process_statements(true_block, node.body)
# A return statement in the branch might mean that a continuation has
# already been set
if not true_end.graph.return_:
true_end.jump(after_block)
# And the second
false_end = self.process_statements(false_block, node.orelse)
if not false_end.graph.return_:
false_end.jump(after_block)
# And stich it together
block.cond(cond, true_block, false_block)
after_block.mature()
return after_block | def process_If(self, block: "Block", node: ast.If) -> "Block":
"""Process a conditional statement.
A conditional statement generates 3 functions: The true branch, the
false branch, and the continuation.
"""
# Process the condition
cond = self.process_node(block, node.test)
# Create two branches
true_block, false_block = self.make_condition_blocks(block)
true_block.graph.debug.location = self.make_location(node.body)
false_block.graph.debug.location = self.make_location(node.orelse)
# Create the continuation
with About(block.graph.debug, "if_after"):
after_block = self.new_block(auxiliary=True)
# Process the first branch
true_end = self.process_statements(true_block, node.body)
# A return statement in the branch might mean that a continuation has
# already been set
if not true_end.graph.return_:
true_end.jump(after_block)
# And the second
false_end = self.process_statements(false_block, node.orelse)
if not false_end.graph.return_:
false_end.jump(after_block)
# And stich it together
block.cond(cond, true_block, false_block)
after_block.mature()
return after_block |
Python | def process_While(self, block: "Block", node: ast.While) -> "Block":
"""Process a while loop.
A while loop will generate 3 functions: The test, the body, and the
continuation.
"""
with About(block.graph.debug, "while_header"):
header_block = self.new_block(auxiliary=True)
with About(block.graph.debug, "while_body"):
body_block = self.new_block(auxiliary=True)
with About(block.graph.debug, "while_after"):
after_block = self.new_block(auxiliary=True)
body_block.preds.append(header_block)
after_block.preds.append(header_block)
block.jump(header_block)
cond = self.process_node(header_block, node.test)
body_block.mature()
header_block.cond(cond, body_block, after_block)
after_body = self.process_statements(body_block, node.body)
if not after_body.graph.return_:
after_body.jump(header_block)
header_block.mature()
after_block.mature()
return after_block | def process_While(self, block: "Block", node: ast.While) -> "Block":
"""Process a while loop.
A while loop will generate 3 functions: The test, the body, and the
continuation.
"""
with About(block.graph.debug, "while_header"):
header_block = self.new_block(auxiliary=True)
with About(block.graph.debug, "while_body"):
body_block = self.new_block(auxiliary=True)
with About(block.graph.debug, "while_after"):
after_block = self.new_block(auxiliary=True)
body_block.preds.append(header_block)
after_block.preds.append(header_block)
block.jump(header_block)
cond = self.process_node(header_block, node.test)
body_block.mature()
header_block.cond(cond, body_block, after_block)
after_body = self.process_statements(body_block, node.body)
if not after_body.graph.return_:
after_body.jump(header_block)
header_block.mature()
after_block.mature()
return after_block |
Python | def apply(self, fn, *args):
"""Create an application of fn on args."""
if self.use_universe:
tget = self.operation("tuple_getitem")
usl = self.operation("universal")
ufn = self.graph.apply(usl, fn)
pair = self.graph.apply(ufn, *args, self.universe)
self.universe = self.graph.apply(tget, pair, 0)
return self.graph.apply(tget, pair, 1)
else:
return self.graph.apply(fn, *args) | def apply(self, fn, *args):
"""Create an application of fn on args."""
if self.use_universe:
tget = self.operation("tuple_getitem")
usl = self.operation("universal")
ufn = self.graph.apply(usl, fn)
pair = self.graph.apply(ufn, *args, self.universe)
self.universe = self.graph.apply(tget, pair, 0)
return self.graph.apply(tget, pair, 1)
else:
return self.graph.apply(fn, *args) |
Python | def add_parameter(self, parameter=None):
"""Add a parameter to the graph."""
p = parameter or Parameter(self.graph)
if self.use_universe:
self.graph.parameters.insert(-1, p)
else:
self.graph.parameters.append(p)
return p | def add_parameter(self, parameter=None):
"""Add a parameter to the graph."""
p = parameter or Parameter(self.graph)
if self.use_universe:
self.graph.parameters.insert(-1, p)
else:
self.graph.parameters.append(p)
return p |
Python | def make_resolve(self, module_name, symbol_name):
"""Return a subtree that resolves a name in a module."""
return self.graph.apply(
operations.resolve, Constant(module_name), Constant(symbol_name)
) | def make_resolve(self, module_name, symbol_name):
"""Return a subtree that resolves a name in a module."""
return self.graph.apply(
operations.resolve, Constant(module_name), Constant(symbol_name)
) |
Python | def make_switch(self, cond, true_block, false_block, op="user_switch"):
"""Return a subtree that implements a switch operation."""
return self.apply(
self.operation(op), cond, true_block.graph, false_block.graph
) | def make_switch(self, cond, true_block, false_block, op="user_switch"):
"""Return a subtree that implements a switch operation."""
return self.apply(
self.operation(op), cond, true_block.graph, false_block.graph
) |
Python | def read(self, varnum: str, resolve_globals=True, lock=False) -> ANFNode:
"""Read a variable.
If this name has defined given in one of the previous statements, it
will be trivially resolved. It is possible that the variable was
defined in a previous block (e.g. outside of the loop body or the
branch). In this case, it will be resolved only if all predecessor
blocks are available. If they are not, we will assume that this
variable is given as a function argument (which plays the role of a phi
node).
Args:
varnum: The name of the variable to read.
resolve_globals: If the name is not resolvable,
assume it is a global
lock: Either False or the Graph that is reading this variable and
for which it should be locked.
"""
if varnum in self.variables:
self.parser.read_cache.add((self, varnum, self.variables[varnum]))
node = self.variables[varnum]
if (
node.is_constant_graph()
and node.value in self.parser.finalizers
):
fin = self.parser.finalizers.pop(node.value)
fin()
if lock:
self.lock[varnum] = lock
return _fresh(node)
nophi = varnum not in self.parser.possible_phis
if self.matured or nophi:
def _resolve():
if not resolve_globals:
return None
cn = self.parser.closure_namespace
if varnum in cn:
return self.make_resolve(cn, varnum)
else:
ns = self.parser.global_namespace
return self.make_resolve(ns, varnum)
if not self.preds:
return _resolve()
if len(self.preds) == 1 or nophi:
result = self.preds[0].read(
varnum,
resolve_globals=False,
lock=lock
or (self.type == "function" and self.graph.debug.name),
)
return result or _resolve()
# TODO: point to the original definition
with About(NamedDebugInfo(name=varnum), "phi"):
phi = Parameter(self.graph)
self.add_parameter(phi)
self.phi_nodes[phi] = varnum
self.write(varnum, phi)
if self.matured:
self.set_phi_arguments(phi)
return phi | def read(self, varnum: str, resolve_globals=True, lock=False) -> ANFNode:
"""Read a variable.
If this name has defined given in one of the previous statements, it
will be trivially resolved. It is possible that the variable was
defined in a previous block (e.g. outside of the loop body or the
branch). In this case, it will be resolved only if all predecessor
blocks are available. If they are not, we will assume that this
variable is given as a function argument (which plays the role of a phi
node).
Args:
varnum: The name of the variable to read.
resolve_globals: If the name is not resolvable,
assume it is a global
lock: Either False or the Graph that is reading this variable and
for which it should be locked.
"""
if varnum in self.variables:
self.parser.read_cache.add((self, varnum, self.variables[varnum]))
node = self.variables[varnum]
if (
node.is_constant_graph()
and node.value in self.parser.finalizers
):
fin = self.parser.finalizers.pop(node.value)
fin()
if lock:
self.lock[varnum] = lock
return _fresh(node)
nophi = varnum not in self.parser.possible_phis
if self.matured or nophi:
def _resolve():
if not resolve_globals:
return None
cn = self.parser.closure_namespace
if varnum in cn:
return self.make_resolve(cn, varnum)
else:
ns = self.parser.global_namespace
return self.make_resolve(ns, varnum)
if not self.preds:
return _resolve()
if len(self.preds) == 1 or nophi:
result = self.preds[0].read(
varnum,
resolve_globals=False,
lock=lock
or (self.type == "function" and self.graph.debug.name),
)
return result or _resolve()
# TODO: point to the original definition
with About(NamedDebugInfo(name=varnum), "phi"):
phi = Parameter(self.graph)
self.add_parameter(phi)
self.phi_nodes[phi] = varnum
self.write(varnum, phi)
if self.matured:
self.set_phi_arguments(phi)
return phi |
Python | def write(self, varnum: str, node: ANFNode, track=True) -> None:
"""Write a variable.
When assignment is used to bound a value to a name, we store this
mapping in the block to be used by subsequent statements.
Args:
varnum: The name of the variable to store.
node: The node representing this value.
track: True if we want to warn about this variable not being used.
"""
if varnum in self.lock:
raise MyiaSyntaxError(
f"Trying to modify variable '{varnum}'"
f" after it was captured by function '{self.lock[varnum]}'.",
loc=current_info().location,
)
self.variables[varnum] = node
if track and not node.is_parameter():
self.parser.write_cache.add((self, varnum, node)) | def write(self, varnum: str, node: ANFNode, track=True) -> None:
"""Write a variable.
When assignment is used to bound a value to a name, we store this
mapping in the block to be used by subsequent statements.
Args:
varnum: The name of the variable to store.
node: The node representing this value.
track: True if we want to warn about this variable not being used.
"""
if varnum in self.lock:
raise MyiaSyntaxError(
f"Trying to modify variable '{varnum}'"
f" after it was captured by function '{self.lock[varnum]}'.",
loc=current_info().location,
)
self.variables[varnum] = node
if track and not node.is_parameter():
self.parser.write_cache.add((self, varnum, node)) |
Python | def jump(self, target: "Block", *args) -> Apply:
"""Jumping from one block to the next becomes a tail call.
This method will generate the tail call by calling the graph
corresponding to the target block using an `Apply` node, and returning
its value with a `Return` node. It will update the predecessor blocks
of the target appropriately.
Args:
target: The block to jump to from this statement.
"""
assert self.graph.return_ is None
jump = self.apply(target.graph, *args)
jump_call = jump
if self.use_universe:
jump_call = jump.inputs[1]
self.jumps[target] = jump_call
target.preds.append(self)
self.returns(jump) | def jump(self, target: "Block", *args) -> Apply:
"""Jumping from one block to the next becomes a tail call.
This method will generate the tail call by calling the graph
corresponding to the target block using an `Apply` node, and returning
its value with a `Return` node. It will update the predecessor blocks
of the target appropriately.
Args:
target: The block to jump to from this statement.
"""
assert self.graph.return_ is None
jump = self.apply(target.graph, *args)
jump_call = jump
if self.use_universe:
jump_call = jump.inputs[1]
self.jumps[target] = jump_call
target.preds.append(self)
self.returns(jump) |
Python | def cond(self, cond: ANFNode, true: "Block", false: "Block") -> Apply:
"""Perform a conditional jump.
This method will generate the call to the if expression and return its
value. The predecessor blocks of the branches will be updated
appropriately.
Args:
cond: The node representing the condition (true or false).
true: The block to jump to if the condition is true.
false: The block to jump to if the condition is false.
"""
assert self.graph.return_ is None
switch = self.make_switch(cond, true, false)
self.returns(self.apply(switch)) | def cond(self, cond: ANFNode, true: "Block", false: "Block") -> Apply:
"""Perform a conditional jump.
This method will generate the call to the if expression and return its
value. The predecessor blocks of the branches will be updated
appropriately.
Args:
cond: The node representing the condition (true or false).
true: The block to jump to if the condition is true.
false: The block to jump to if the condition is false.
"""
assert self.graph.return_ is None
switch = self.make_switch(cond, true, false)
self.returns(self.apply(switch)) |
Python | def raises(self, exc):
"""Raise an exception in this block."""
inputs = [Constant(primops.raise_), exc]
raise_ = Apply(inputs, self.graph)
self.returns(raise_) | def raises(self, exc):
"""Raise an exception in this block."""
inputs = [Constant(primops.raise_), exc]
raise_ = Apply(inputs, self.graph)
self.returns(raise_) |
Python | def returns(self, value):
"""Return a value in this block."""
assert self.graph.return_ is None
if self.use_universe:
tmake = self.operation("make_tuple")
assert self.graph.return_ is None
self.graph.output = self.graph.apply(tmake, self.universe, value)
else:
self.graph.output = value | def returns(self, value):
"""Return a value in this block."""
assert self.graph.return_ is None
if self.use_universe:
tmake = self.operation("make_tuple")
assert self.graph.return_ is None
self.graph.output = self.graph.apply(tmake, self.universe, value)
else:
self.graph.output = value |
Python | async def infer_invert_permutation(self, engine, perm: u64tup_typecheck):
"""Infer the return type of primitive `invert_permutation`."""
v = [x.xvalue() for x in perm.elements]
return AbstractTuple(
[
AbstractScalar({VALUE: v.index(i), TYPE: xtype.UInt[64]})
if i in v
else AbstractScalar({VALUE: ANYTHING, TYPE: xtype.UInt[64]})
for i in range(len(v))
]
) | async def infer_invert_permutation(self, engine, perm: u64tup_typecheck):
"""Infer the return type of primitive `invert_permutation`."""
v = [x.xvalue() for x in perm.elements]
return AbstractTuple(
[
AbstractScalar({VALUE: v.index(i), TYPE: xtype.UInt[64]})
if i in v
else AbstractScalar({VALUE: ANYTHING, TYPE: xtype.UInt[64]})
for i in range(len(v))
]
) |
Python | async def infer_tagged(self, engine, x, *rest):
"""Infer the return type of primitive `tagged`."""
if len(rest) == 0:
return lib.AbstractUnion([lib.broaden(x, loop=engine.loop)])
elif len(rest) == 1:
(tag,) = rest
tag_v = self.require_constant(tag, argnum=2)
return lib.AbstractTaggedUnion(
[[tag_v, lib.broaden(x, loop=engine.loop)]]
)
else:
raise lib.type_error_nargs(P.tagged, "1 or 2", len(rest) + 1) | async def infer_tagged(self, engine, x, *rest):
"""Infer the return type of primitive `tagged`."""
if len(rest) == 0:
return lib.AbstractUnion([lib.broaden(x, loop=engine.loop)])
elif len(rest) == 1:
(tag,) = rest
tag_v = self.require_constant(tag, argnum=2)
return lib.AbstractTaggedUnion(
[[tag_v, lib.broaden(x, loop=engine.loop)]]
)
else:
raise lib.type_error_nargs(P.tagged, "1 or 2", len(rest) + 1) |
Python | def default_convert(env, fn: FunctionType, manage):
"""Default converter for Python types."""
g = parser.parse(fn)
rval = env(g, manage)
env.set_cached(fn, rval)
return rval | def default_convert(env, fn: FunctionType, manage):
"""Default converter for Python types."""
g = parser.parse(fn)
rval = env(g, manage)
env.set_cached(fn, rval)
return rval |
Python | def activate(self, force=False):
"""Activate the tracker.
If the tracker is already activated, this does nothing.
"""
if force or not self.activated:
self.manager.events.add_node.register(self._on_add_node)
self.manager.events.drop_node.register(self._on_drop_node)
self.manager.events.post_reset.register(
lambda evt: self.activate(force=True)
)
self.activated = True | def activate(self, force=False):
"""Activate the tracker.
If the tracker is already activated, this does nothing.
"""
if force or not self.activated:
self.manager.events.add_node.register(self._on_add_node)
self.manager.events.drop_node.register(self._on_drop_node)
self.manager.events.post_reset.register(
lambda evt: self.activate(force=True)
)
self.activated = True |
Python | def isomorphic(g1, g2, equiv=None):
r"""Return whether g1 and g2 are structurally equivalent.
Constants are isomorphic iff they contain the same value or are isomorphic
graphs.
g1.return\_ and g2.return\_ must represent the same node under the
isomorphism. Parameters must match in the same order.
"""
if equiv is None:
equiv = {}
if (g1, g2) in equiv:
return equiv[(g1, g2)] is not False
if len(g1.parameters) != len(g2.parameters):
return False
equiv.update(dict(zip(g1.parameters, g2.parameters)))
equiv[(g1, g2)] = "PENDING"
rval = _same_subgraph(g1.return_, g2.return_, equiv)
equiv[(g1, g2)] = rval
return rval | def isomorphic(g1, g2, equiv=None):
r"""Return whether g1 and g2 are structurally equivalent.
Constants are isomorphic iff they contain the same value or are isomorphic
graphs.
g1.return\_ and g2.return\_ must represent the same node under the
isomorphism. Parameters must match in the same order.
"""
if equiv is None:
equiv = {}
if (g1, g2) in equiv:
return equiv[(g1, g2)] is not False
if len(g1.parameters) != len(g2.parameters):
return False
equiv.update(dict(zip(g1.parameters, g2.parameters)))
equiv[(g1, g2)] = "PENDING"
rval = _same_subgraph(g1.return_, g2.return_, equiv)
equiv[(g1, g2)] = rval
return rval |
Python | def print_node(node):
"""Return a textual representation of a node and its ancestors."""
import io
buf = io.StringIO()
g = node.graph
for n in toposort(node):
if n.graph is not None and n.graph is not g:
continue
_print_node(n, buf)
return buf.getvalue() | def print_node(node):
"""Return a textual representation of a node and its ancestors."""
import io
buf = io.StringIO()
g = node.graph
for n in toposort(node):
if n.graph is not None and n.graph is not g:
continue
_print_node(n, buf)
return buf.getvalue() |
Python | def print_graph(g, allow_cycles=True):
"""Returns a textual representation of a graph."""
import io
buf = io.StringIO()
print(f"graph {str(g)}(", file=buf, end="")
print(
", ".join(
f"%{str(p)}{' : ' + str(p.abstract) if p.abstract is not None else ''}"
for p in g.parameters
),
file=buf,
end="",
)
print(") ", file=buf, end="")
if g.abstract is not None:
print(f"-> {g.abstract.output} ", file=buf, end="")
print("{", file=buf)
seen_graphs = set([g])
def _succ_deep_once(node):
if node.is_constant_graph():
res = [node.value.return_] if node.value not in seen_graphs else []
seen_graphs.add(node.value)
return res
else:
return node.incoming
for node in _toposort(g.output, _succ_deep_once, allow_cycles=allow_cycles):
if (
node.graph is not None and node.graph is not g
) or node is g.return_:
# There is a bug in coverage which makes it ignore the continue,
# even though it is covered.
continue # pragma: no cover
_print_node(node, buf, offset=2)
print(f" return %{str(g.output)}", file=buf)
print("}", file=buf)
return buf.getvalue() | def print_graph(g, allow_cycles=True):
"""Returns a textual representation of a graph."""
import io
buf = io.StringIO()
print(f"graph {str(g)}(", file=buf, end="")
print(
", ".join(
f"%{str(p)}{' : ' + str(p.abstract) if p.abstract is not None else ''}"
for p in g.parameters
),
file=buf,
end="",
)
print(") ", file=buf, end="")
if g.abstract is not None:
print(f"-> {g.abstract.output} ", file=buf, end="")
print("{", file=buf)
seen_graphs = set([g])
def _succ_deep_once(node):
if node.is_constant_graph():
res = [node.value.return_] if node.value not in seen_graphs else []
seen_graphs.add(node.value)
return res
else:
return node.incoming
for node in _toposort(g.output, _succ_deep_once, allow_cycles=allow_cycles):
if (
node.graph is not None and node.graph is not g
) or node is g.return_:
# There is a bug in coverage which makes it ignore the continue,
# even though it is covered.
continue # pragma: no cover
_print_node(node, buf, offset=2)
print(f" return %{str(g.output)}", file=buf)
print("}", file=buf)
return buf.getvalue() |
Python | def lop(op, type, name):
"""Generate a left hand side method.
Returns NotImplemented if the second input has the wrong type.
"""
@core(name=name, static_inline=True)
def protocol(x, y):
if hastype(y, type):
return op(x, y)
else:
return NotImplemented
return protocol | def lop(op, type, name):
"""Generate a left hand side method.
Returns NotImplemented if the second input has the wrong type.
"""
@core(name=name, static_inline=True)
def protocol(x, y):
if hastype(y, type):
return op(x, y)
else:
return NotImplemented
return protocol |
Python | def rop(op, type, name):
"""Generate a right hand side method.
Returns NotImplemented if the second input has the wrong type.
"""
@core(name=name, static_inline=True)
def protocol(x, y):
if hastype(y, type):
return op(y, x)
else:
return NotImplemented
return protocol | def rop(op, type, name):
"""Generate a right hand side method.
Returns NotImplemented if the second input has the wrong type.
"""
@core(name=name, static_inline=True)
def protocol(x, y):
if hastype(y, type):
return op(y, x)
else:
return NotImplemented
return protocol |
Python | def reverse_binop(op, name):
"""Reverse the argument order of a binary function."""
@core(name=name, static_inline=True)
def protocol(x, y):
return op(y, x)
return protocol | def reverse_binop(op, name):
"""Reverse the argument order of a binary function."""
@core(name=name, static_inline=True)
def protocol(x, y):
return op(y, x)
return protocol |
Python | def tentative(self, p: Possibilities, *, loop): # noqa: D417
"""Broaden an abstract value and make it tentative.
* Concrete values such as 1 or True will be broadened to ANYTHING.
* Possibilities will be broadened to PendingTentative. This allows
us to avoid resolving them earlier than we would like.
Arguments:
p: The abstract data to clone.
loop: The InferenceLoop, used to broaden Possibilities.
"""
return loop.create_pending_tentative(tentative=p) | def tentative(self, p: Possibilities, *, loop): # noqa: D417
"""Broaden an abstract value and make it tentative.
* Concrete values such as 1 or True will be broadened to ANYTHING.
* Possibilities will be broadened to PendingTentative. This allows
us to avoid resolving them earlier than we would like.
Arguments:
p: The abstract data to clone.
loop: The InferenceLoop, used to broaden Possibilities.
"""
return loop.create_pending_tentative(tentative=p) |
Python | def amerge(self, x1, x2, forced=False, bind_pending=True, accept_pending=True):
"""Merge two values.
If forced is False, amerge will return a superset of x1 and x2, if it
exists.
If the forced argument is True, amerge will either return x1 or fail.
This makes a difference in some situations:
* amerge(1, 2, forced=False) => ANYTHING
* amerge(1, 2, forced=True) => Error
* amerge(ANYTHING, 1234, forced=True) => ANYTHING
* amerge(1234, ANYTHING, forced=True) => Error
Arguments:
x1: The first value to merge
x2: The second value to merge
forced: Whether we are already committed to returning x1 or not.
bind_pending: Whether we bind two Pending, unresolved values.
accept_pending: Works the same as bind_pending, but only for the
top level call.
"""
if x1 is x2:
return x1
keypair = (id(x1), id(x2))
if keypair in self.state:
result = self.state[keypair]
if result is ABSENT:
# Setting forced=True will set the keypair to x1 (and then check
# that x1 and x2 are compatible under forced=True), which lets us
# return a result for self-referential data.
return amerge(
x1,
x2,
forced=True,
bind_pending=bind_pending,
accept_pending=accept_pending,
)
else:
return result
def helper():
nonlocal x1, x2
while isinstance(x1, Pending) and x1.done() and not forced:
x1 = x1.result()
while isinstance(x2, Pending) and x2.done():
x2 = x2.result()
isp1 = isinstance(x1, Pending)
isp2 = isinstance(x2, Pending)
loop = x1.get_loop() if isp1 else x2.get_loop() if isp2 else None
if isinstance(x1, PendingTentative):
new_tentative = self(x1.tentative, x2, False, True, bind_pending)
assert not isinstance(new_tentative, Pending)
x1.tentative = new_tentative
return x1
if isinstance(x2, PendingTentative):
new_tentative = self(
x1, x2.tentative, forced, bind_pending, accept_pending
)
assert not isinstance(new_tentative, Pending)
x2.tentative = new_tentative
return new_tentative if forced else x2
if (isp1 or isp2) and (not accept_pending or not bind_pending):
if forced and isp1:
raise MyiaTypeError("Cannot have Pending here.")
if isp1:
def chk(a):
return self(a, x2, forced, bind_pending)
return find_coherent_result_sync(x1, chk)
if isp2:
def chk(a):
return self(x1, a, forced, bind_pending)
return find_coherent_result_sync(x2, chk)
if isp1 and isp2:
return bind(loop, x1 if forced else None, [], [x1, x2])
elif isp1:
return bind(loop, x1 if forced else None, [x2], [x1])
elif isp2:
return bind(loop, x1 if forced else None, [x1], [x2])
elif isinstance(x2, AbstractBottom): # pragma: no cover
return x1
elif isinstance(x1, AbstractBottom):
if forced: # pragma: no cover
# I am not sure how to trigger this
raise TypeMismatchError(x1, x2)
return x2
elif x1 is ANYTHING:
return x1
elif x2 is ANYTHING:
if forced:
raise TypeMismatchError(x1, x2)
return x2
else:
return self[type(x1), type(x2), object, object](
x1, x2, forced, bind_pending
)
self.state[keypair] = x1 if forced else ABSENT
rval = helper()
self.state[keypair] = rval
if forced:
assert rval is x1
return rval | def amerge(self, x1, x2, forced=False, bind_pending=True, accept_pending=True):
"""Merge two values.
If forced is False, amerge will return a superset of x1 and x2, if it
exists.
If the forced argument is True, amerge will either return x1 or fail.
This makes a difference in some situations:
* amerge(1, 2, forced=False) => ANYTHING
* amerge(1, 2, forced=True) => Error
* amerge(ANYTHING, 1234, forced=True) => ANYTHING
* amerge(1234, ANYTHING, forced=True) => Error
Arguments:
x1: The first value to merge
x2: The second value to merge
forced: Whether we are already committed to returning x1 or not.
bind_pending: Whether we bind two Pending, unresolved values.
accept_pending: Works the same as bind_pending, but only for the
top level call.
"""
if x1 is x2:
return x1
keypair = (id(x1), id(x2))
if keypair in self.state:
result = self.state[keypair]
if result is ABSENT:
# Setting forced=True will set the keypair to x1 (and then check
# that x1 and x2 are compatible under forced=True), which lets us
# return a result for self-referential data.
return amerge(
x1,
x2,
forced=True,
bind_pending=bind_pending,
accept_pending=accept_pending,
)
else:
return result
def helper():
nonlocal x1, x2
while isinstance(x1, Pending) and x1.done() and not forced:
x1 = x1.result()
while isinstance(x2, Pending) and x2.done():
x2 = x2.result()
isp1 = isinstance(x1, Pending)
isp2 = isinstance(x2, Pending)
loop = x1.get_loop() if isp1 else x2.get_loop() if isp2 else None
if isinstance(x1, PendingTentative):
new_tentative = self(x1.tentative, x2, False, True, bind_pending)
assert not isinstance(new_tentative, Pending)
x1.tentative = new_tentative
return x1
if isinstance(x2, PendingTentative):
new_tentative = self(
x1, x2.tentative, forced, bind_pending, accept_pending
)
assert not isinstance(new_tentative, Pending)
x2.tentative = new_tentative
return new_tentative if forced else x2
if (isp1 or isp2) and (not accept_pending or not bind_pending):
if forced and isp1:
raise MyiaTypeError("Cannot have Pending here.")
if isp1:
def chk(a):
return self(a, x2, forced, bind_pending)
return find_coherent_result_sync(x1, chk)
if isp2:
def chk(a):
return self(x1, a, forced, bind_pending)
return find_coherent_result_sync(x2, chk)
if isp1 and isp2:
return bind(loop, x1 if forced else None, [], [x1, x2])
elif isp1:
return bind(loop, x1 if forced else None, [x2], [x1])
elif isp2:
return bind(loop, x1 if forced else None, [x1], [x2])
elif isinstance(x2, AbstractBottom): # pragma: no cover
return x1
elif isinstance(x1, AbstractBottom):
if forced: # pragma: no cover
# I am not sure how to trigger this
raise TypeMismatchError(x1, x2)
return x2
elif x1 is ANYTHING:
return x1
elif x2 is ANYTHING:
if forced:
raise TypeMismatchError(x1, x2)
return x2
else:
return self[type(x1), type(x2), object, object](
x1, x2, forced, bind_pending
)
self.state[keypair] = x1 if forced else ABSENT
rval = helper()
self.state[keypair] = rval
if forced:
assert rval is x1
return rval |
Python | def annotation_merge(self, x1: AbstractUnion, x2: AbstractValue, forced, bp):
"""Special variant to merge an abstract union with an abstract value.
Example case: when checking an annotation `list` (without arguments)
against a list (AbstractADT), `type_to_abstract(annotation)` will return
an union (union of empty and list of anything), and then
amerge(annotation: AbstractUnion, abstract: AbstractADT) will fail.
This variant is intended to check such cases.
"""
# We check if at least one option in x1 matches x2.
merged = None
for option in set(x1.options):
try:
merged = self(option, x2, forced, bp)
break
except TypeMismatchError:
pass
if merged is None:
raise TypeMismatchError(x1, x2)
# If forced, we return the union.
if forced:
return x1
# Otherwise, we return the merged type.
return merged | def annotation_merge(self, x1: AbstractUnion, x2: AbstractValue, forced, bp):
"""Special variant to merge an abstract union with an abstract value.
Example case: when checking an annotation `list` (without arguments)
against a list (AbstractADT), `type_to_abstract(annotation)` will return
an union (union of empty and list of anything), and then
amerge(annotation: AbstractUnion, abstract: AbstractADT) will fail.
This variant is intended to check such cases.
"""
# We check if at least one option in x1 matches x2.
merged = None
for option in set(x1.options):
try:
merged = self(option, x2, forced, bp)
break
except TypeMismatchError:
pass
if merged is None:
raise TypeMismatchError(x1, x2)
# If forced, we return the union.
if forced:
return x1
# Otherwise, we return the merged type.
return merged |
Python | def annotation_merge(self, x1: AbstractDict, x2: AbstractDict, forced, bp):
"""Specific case when annotation (x1) is a dict type hint."""
entries_1 = x1.entries
entries_2 = x2.entries
if isinstance(entries_1, DictDesc):
# Create a dict in entries_1 using keys from entries_2.
entries_1 = entries_1.to_dict(entries_2.keys())
# Otherwise, entries_1 and entries_2 should be real dicts.
args1 = (entries_1, x1.values)
args2 = (entries_2, x2.values)
merged = self(args1, args2, forced, bp)
return x1 if (forced or merged is args1) else type(x1)(*merged) | def annotation_merge(self, x1: AbstractDict, x2: AbstractDict, forced, bp):
"""Specific case when annotation (x1) is a dict type hint."""
entries_1 = x1.entries
entries_2 = x2.entries
if isinstance(entries_1, DictDesc):
# Create a dict in entries_1 using keys from entries_2.
entries_1 = entries_1.to_dict(entries_2.keys())
# Otherwise, entries_1 and entries_2 should be real dicts.
args1 = (entries_1, x1.values)
args2 = (entries_2, x2.values)
merged = self(args1, args2, forced, bp)
return x1 if (forced or merged is args1) else type(x1)(*merged) |
Python | def split_type(t, model):
"""Checks t against the model and return matching/non-matching subtypes.
* If t is a Union, return a Union that fully matches model, and a Union
that does not match model. No matches in either case returns None for
that case.
* Otherwise, return (t, None) or (None, t) depending on whether t matches
the model.
"""
if isinstance(t, AbstractUnion):
matching = [(opt, typecheck(model, opt)) for opt in set(t.options)]
t1 = union_simplify(opt for opt, m in matching if m)
t2 = union_simplify(opt for opt, m in matching if not m)
return t1, t2
elif typecheck(model, t):
return t, None
else:
return None, t | def split_type(t, model):
"""Checks t against the model and return matching/non-matching subtypes.
* If t is a Union, return a Union that fully matches model, and a Union
that does not match model. No matches in either case returns None for
that case.
* Otherwise, return (t, None) or (None, t) depending on whether t matches
the model.
"""
if isinstance(t, AbstractUnion):
matching = [(opt, typecheck(model, opt)) for opt in set(t.options)]
t1 = union_simplify(opt for opt, m in matching if m)
t2 = union_simplify(opt for opt, m in matching if not m)
return t1, t2
elif typecheck(model, t):
return t, None
else:
return None, t |
Python | def typecheck(model, abstract):
"""Check that abstract matches the model."""
try:
amerge(model, abstract, forced=True, bind_pending=False)
except MyiaTypeError:
return False
else:
return True | def typecheck(model, abstract):
"""Check that abstract matches the model."""
try:
amerge(model, abstract, forced=True, bind_pending=False)
except MyiaTypeError:
return False
else:
return True |
Python | def glob_to_regex(glob):
"""Transforms a glob-like expression into a regular expression.
* `**` matches any character sequence including the / delimiter
* `/**/` can also match `/`
* `*` matches any character sequence except /
* If glob does not start with /, `/**/` is prepended
"""
def replacer(m):
if m.group() == "/**/":
return r"(/.*/|/)"
else:
return r"[^/]*"
if glob.startswith("**"):
glob = f"/{glob}"
elif not glob.startswith("/"):
glob = f"/**/{glob}"
if glob.endswith("**"):
glob += "/*"
patt = r"/\*\*/|\*"
glob = re.sub(patt, replacer, glob)
return re.compile(glob) | def glob_to_regex(glob):
"""Transforms a glob-like expression into a regular expression.
* `**` matches any character sequence including the / delimiter
* `/**/` can also match `/`
* `*` matches any character sequence except /
* If glob does not start with /, `/**/` is prepended
"""
def replacer(m):
if m.group() == "/**/":
return r"(/.*/|/)"
else:
return r"[^/]*"
if glob.startswith("**"):
glob = f"/{glob}"
elif not glob.startswith("/"):
glob = f"/**/{glob}"
if glob.endswith("**"):
glob += "/*"
patt = r"/\*\*/|\*"
glob = re.sub(patt, replacer, glob)
return re.compile(glob) |
Python | def install(self, tracer):
"""Install the listeners on the tracer."""
for method_name in dir(self):
if method_name.startswith("on_"):
ev = method_name[3:]
patt = f"{self.focus}/{ev}" if self.focus else ev
tracer.on(patt, getattr(self, method_name)) | def install(self, tracer):
"""Install the listeners on the tracer."""
for method_name in dir(self):
if method_name.startswith("on_"):
ev = method_name[3:]
patt = f"{self.focus}/{ev}" if self.focus else ev
tracer.on(patt, getattr(self, method_name)) |
Python | def post(self):
"""Print the events and their arguments."""
for path, keys in self.paths.items():
print(path)
for key in sorted(keys):
typenames = {v.__qualname__ for v in keys[key]}
print(" ", key, "::", " | ".join(typenames)) | def post(self):
"""Print the events and their arguments."""
for path, keys in self.paths.items():
print(path)
for key in sorted(keys):
typenames = {v.__qualname__ for v in keys[key]}
print(" ", key, "::", " | ".join(typenames)) |
Python | def print(self, *, indent=0):
"""Print a visualisation of a profile."""
ind = " " * indent
if self.name is not None and self.total is not None:
print(f"{ind}{self.name:30}{_unit(self.total)}")
indent += 3
ind = " " * indent
if self.overhead:
print(f'{ind}{"[overhead]":30}{_unit(self.overhead)}')
for prof2 in self.values():
prof2.print(indent=indent) | def print(self, *, indent=0):
"""Print a visualisation of a profile."""
ind = " " * indent
if self.name is not None and self.total is not None:
print(f"{ind}{self.name:30}{_unit(self.total)}")
indent += 3
ind = " " * indent
if self.overhead:
print(f'{ind}{"[overhead]":30}{_unit(self.overhead)}')
for prof2 in self.values():
prof2.print(indent=indent) |
Python | def on_enter(self, _stack=None, profile=True, **kwargs):
"""Executed when a block is entered."""
if not profile:
return
d = self.hierarchical
for part in _stack[:-1]:
if not part.kwargs.get("profile", True):
return
d = d[part.name]
lpart = _stack[-1]
m = ProfileResults(lpart.name)
if lpart.name in d:
lpart.name = f"{lpart.name}.{self._ctr}"
self._ctr += 1
d[lpart.name] = m
m.start = perf_counter() | def on_enter(self, _stack=None, profile=True, **kwargs):
"""Executed when a block is entered."""
if not profile:
return
d = self.hierarchical
for part in _stack[:-1]:
if not part.kwargs.get("profile", True):
return
d = d[part.name]
lpart = _stack[-1]
m = ProfileResults(lpart.name)
if lpart.name in d:
lpart.name = f"{lpart.name}.{self._ctr}"
self._ctr += 1
d[lpart.name] = m
m.start = perf_counter() |
Python | def on_exit(self, _stack=None, profile=True, **kwargs):
"""Executed when a block is exited."""
if not profile:
return
end = perf_counter()
d = self.hierarchical
for part in _stack:
if not part.kwargs.get("profile", True):
return
d = d[part.name]
d.end = end
d.total = d.end - d.start
d.parts_total = sum(v.total for v in d.values())
if d.parts_total:
d.overhead = d.total - d.parts_total
else:
d.overhead = 0
self.overhead += d.overhead
self.aggregate[d.name].append(d) | def on_exit(self, _stack=None, profile=True, **kwargs):
"""Executed when a block is exited."""
if not profile:
return
end = perf_counter()
d = self.hierarchical
for part in _stack:
if not part.kwargs.get("profile", True):
return
d = d[part.name]
d.end = end
d.total = d.end - d.start
d.parts_total = sum(v.total for v in d.values())
if d.parts_total:
d.overhead = d.total - d.parts_total
else:
d.overhead = 0
self.overhead += d.overhead
self.aggregate[d.name].append(d) |
Python | def resolve_tracers(spec):
"""Return a list of (fn, args) pairs from a string specification.
The specification has the following forms:
* module.function
* module.function:arg
* module.function:arg1:arg2
* module.function(arg1,arg2)
* module.function1;module.function2
The function is not called immediately, the consumer must call function on
args when needed.
"""
def _resolve_single(call):
if "(" in call:
path, args = call.split("(", 1)
assert args.endswith(")")
args = eval(f"({args[:-1]},)")
elif ":" in call:
path, *args = call.split(":")
else:
path = call
args = ()
modname, field = path.rsplit(".", 1)
mod = __import__(modname, fromlist=[field])
fn = getattr(mod, field)
return fn, args
return [_resolve_single(call.strip()) for call in spec.split(";")] | def resolve_tracers(spec):
"""Return a list of (fn, args) pairs from a string specification.
The specification has the following forms:
* module.function
* module.function:arg
* module.function:arg1:arg2
* module.function(arg1,arg2)
* module.function1;module.function2
The function is not called immediately, the consumer must call function on
args when needed.
"""
def _resolve_single(call):
if "(" in call:
path, args = call.split("(", 1)
assert args.endswith(")")
args = eval(f"({args[:-1]},)")
elif ":" in call:
path, *args = call.split(":")
else:
path = call
args = ()
modname, field = path.rsplit(".", 1)
mod = __import__(modname, fromlist=[field])
fn = getattr(mod, field)
return fn, args
return [_resolve_single(call.strip()) for call in spec.split(";")] |
Python | def M(mg):
"""Create a variable that matches a Metagraph."""
def chk(x):
return x.is_constant_graph() and x.value.flags.get("metagraph") == mg
return var(chk) | def M(mg):
"""Create a variable that matches a Metagraph."""
def chk(x):
return x.is_constant_graph() and x.value.flags.get("metagraph") == mg
return var(chk) |
Python | def unfuse_composite(resources, node, equiv):
"""Tranform array_map on a graph to a graph of array_maps.
This must be applied to scalar-only graphs.
"""
# This has to be defined inline because of circular imports
class UnfuseRemapper(BasicRemapper):
def __init__(self, g, reference):
super().__init__(
graphs=g.graphs_used.keys() | {g}, relation="unfused"
)
self.reference = reference
def asarray(self, ng, i):
if i.is_constant():
typ = self.reference.abstract or ng.apply(
typeof, self.reference
)
return ng.apply(
P.distribute,
ng.apply(P.scalar_to_array, i, typ),
ng.apply(P.shape, self.reference),
)
else:
return i
def link_apply(self, link):
ng = link.new_graph
node = link.node
assert node.inputs[0].is_constant(Primitive)
ni = [self.asarray(ng, self.repl[i]) for i in node.inputs[1:]]
link.new_node.inputs = [
ng.constant(P.array_map),
node.inputs[0],
] + ni
def finalize_graph(self, g, ng):
# This fails if we set .return_ instead of .output, not sure why.
ng.output = self.repl[g.output]
g = equiv[G].value
xs = equiv[Xs]
r = UnfuseRemapper(g, xs[0])
r.run()
ng = r.get_graph(g)
for param, orig in zip(ng.parameters, xs):
param.abstract = orig.abstract
_set_out_abstract(ng, node.abstract)
return node.graph.apply(ng, *xs) | def unfuse_composite(resources, node, equiv):
"""Tranform array_map on a graph to a graph of array_maps.
This must be applied to scalar-only graphs.
"""
# This has to be defined inline because of circular imports
class UnfuseRemapper(BasicRemapper):
def __init__(self, g, reference):
super().__init__(
graphs=g.graphs_used.keys() | {g}, relation="unfused"
)
self.reference = reference
def asarray(self, ng, i):
if i.is_constant():
typ = self.reference.abstract or ng.apply(
typeof, self.reference
)
return ng.apply(
P.distribute,
ng.apply(P.scalar_to_array, i, typ),
ng.apply(P.shape, self.reference),
)
else:
return i
def link_apply(self, link):
ng = link.new_graph
node = link.node
assert node.inputs[0].is_constant(Primitive)
ni = [self.asarray(ng, self.repl[i]) for i in node.inputs[1:]]
link.new_node.inputs = [
ng.constant(P.array_map),
node.inputs[0],
] + ni
def finalize_graph(self, g, ng):
# This fails if we set .return_ instead of .output, not sure why.
ng.output = self.repl[g.output]
g = equiv[G].value
xs = equiv[Xs]
r = UnfuseRemapper(g, xs[0])
r.run()
ng = r.get_graph(g)
for param, orig in zip(ng.parameters, xs):
param.abstract = orig.abstract
_set_out_abstract(ng, node.abstract)
return node.graph.apply(ng, *xs) |
Python | def simplify_array_map(resources, node, equiv):
"""Simplify array_map on certain graphs.
If the graph cannot be eliminated, it is marked with the flag
`inline_inside`, meaning that all calls within it must be inlined.
Examples:
array_map(lambda x, y: f(x, y), xs, ys)
=> array_map(f, xs, ys)
array_map(lambda x, y: f(y, x), xs, ys)
=> array_map(f, ys, xs)
array_map(lambda x, y: x, xs, ys)
=> xs
array_map(lambda x: f(x, 3), xs)
=> array_map(f, xs, distribute(scalar_to_array(3), shape(xs)))
"""
g = equiv[G].value
xs = equiv[Xs]
def to_outer(x):
if x.is_parameter():
idx = g.parameters.index(x)
return xs[idx]
elif x.is_constant() and issubclass(x.abstract.xtype(), Number):
shp = (P.shape, xs[0])
typ = xs[0].abstract or (typeof, xs[0])
sexp = (P.distribute, (P.scalar_to_array, x, typ), shp)
return sexp_to_node(sexp, node.graph)
else:
# Raise a semi-rare exception that won't hide bugs
raise NotImplementedError()
if len(g.scope) > 1:
return node
out = g.output
try:
if out.is_parameter() or out.is_constant():
return to_outer(out)
elif out.inputs[0].is_constant():
args = [to_outer(arg) for arg in out.inputs[1:]]
return node.graph.apply(P.array_map, out.inputs[0], *args)
else:
return node # pragma: no cover
except NotImplementedError:
if g.has_flags("inline_inside"):
return node
else:
g.set_flags("inline_inside")
return True | def simplify_array_map(resources, node, equiv):
"""Simplify array_map on certain graphs.
If the graph cannot be eliminated, it is marked with the flag
`inline_inside`, meaning that all calls within it must be inlined.
Examples:
array_map(lambda x, y: f(x, y), xs, ys)
=> array_map(f, xs, ys)
array_map(lambda x, y: f(y, x), xs, ys)
=> array_map(f, ys, xs)
array_map(lambda x, y: x, xs, ys)
=> xs
array_map(lambda x: f(x, 3), xs)
=> array_map(f, xs, distribute(scalar_to_array(3), shape(xs)))
"""
g = equiv[G].value
xs = equiv[Xs]
def to_outer(x):
if x.is_parameter():
idx = g.parameters.index(x)
return xs[idx]
elif x.is_constant() and issubclass(x.abstract.xtype(), Number):
shp = (P.shape, xs[0])
typ = xs[0].abstract or (typeof, xs[0])
sexp = (P.distribute, (P.scalar_to_array, x, typ), shp)
return sexp_to_node(sexp, node.graph)
else:
# Raise a semi-rare exception that won't hide bugs
raise NotImplementedError()
if len(g.scope) > 1:
return node
out = g.output
try:
if out.is_parameter() or out.is_constant():
return to_outer(out)
elif out.inputs[0].is_constant():
args = [to_outer(arg) for arg in out.inputs[1:]]
return node.graph.apply(P.array_map, out.inputs[0], *args)
else:
return node # pragma: no cover
except NotImplementedError:
if g.has_flags("inline_inside"):
return node
else:
g.set_flags("inline_inside")
return True |
Python | def force_constants(resources, node, equiv):
"""Replace nodes with a constant value if the value is in its type."""
node = equiv[X]
if (
node.is_constant()
or node.is_parameter()
or node.graph
and node is node.graph.return_
):
return None
try:
val = build_value(node.abstract)
except Exception:
return None
with untested():
if val is DEAD:
return None
ct = Constant(val)
ct.abstract = node.abstract
return ct | def force_constants(resources, node, equiv):
"""Replace nodes with a constant value if the value is in its type."""
node = equiv[X]
if (
node.is_constant()
or node.is_parameter()
or node.graph
and node is node.graph.return_
):
return None
try:
val = build_value(node.abstract)
except Exception:
return None
with untested():
if val is DEAD:
return None
ct = Constant(val)
ct.abstract = node.abstract
return ct |
Python | def replace_applicator(resources, node, equiv):
"""Replace a function that applies another by the other function.
For example, `lambda x, y: f(x, y)` is replaced by f.
The inner function must be applied on all the outer function's parameters
in the exact same order, and it must be either a Primitive or a global
function.
"""
g = equiv[G].value
out = g.output
if out.is_apply() and out.inputs[1:] == g.parameters:
inner = out.inputs[0]
# NOTE: it is likely correct to use `inner.value.parent is not g` as
# the condition instead of `is None`, the current code is just playing
# it safe.
if (
inner.is_constant(Primitive)
or inner.is_constant_graph()
and inner.value.parent is None
):
return inner
return node | def replace_applicator(resources, node, equiv):
"""Replace a function that applies another by the other function.
For example, `lambda x, y: f(x, y)` is replaced by f.
The inner function must be applied on all the outer function's parameters
in the exact same order, and it must be either a Primitive or a global
function.
"""
g = equiv[G].value
out = g.output
if out.is_apply() and out.inputs[1:] == g.parameters:
inner = out.inputs[0]
# NOTE: it is likely correct to use `inner.value.parent is not g` as
# the condition instead of `is None`, the current code is just playing
# it safe.
if (
inner.is_constant(Primitive)
or inner.is_constant_graph()
and inner.value.parent is None
):
return inner
return node |
Python | def specialize_transform(graph, args):
"""Specialize on provided non-None args.
Parameters that are specialized on are removed.
"""
mng = graph.manager
graph = transformable_clone(graph, relation=f"sp")
mng.add_graph(graph)
for p, arg in zip(graph.parameters, args):
if arg is not None:
mng.replace(p, Constant(arg))
new_parameters = [
p for p, arg in zip(graph.parameters, args) if arg is None
]
mng.set_parameters(graph, new_parameters)
return graph | def specialize_transform(graph, args):
"""Specialize on provided non-None args.
Parameters that are specialized on are removed.
"""
mng = graph.manager
graph = transformable_clone(graph, relation=f"sp")
mng.add_graph(graph)
for p, arg in zip(graph.parameters, args):
if arg is not None:
mng.replace(p, Constant(arg))
new_parameters = [
p for p, arg in zip(graph.parameters, args) if arg is None
]
mng.set_parameters(graph, new_parameters)
return graph |
Python | def specialize_on_graph_arguments(resources, node, equiv):
"""Specialize a call on constant graph arguments."""
g = equiv[G].value
xs = equiv[Xs]
specialize = [x.is_constant((Graph, Primitive)) for x in xs]
if not any(specialize):
return node
specialize_map = tuple(
x.value if s else None for x, s in zip(xs, specialize)
)
new_xs = [x for x, s in zip(xs, specialize) if not s]
g2 = specialize_transform(g, specialize_map)
return node.graph.apply(g2, *new_xs) | def specialize_on_graph_arguments(resources, node, equiv):
"""Specialize a call on constant graph arguments."""
g = equiv[G].value
xs = equiv[Xs]
specialize = [x.is_constant((Graph, Primitive)) for x in xs]
if not any(specialize):
return node
specialize_map = tuple(
x.value if s else None for x, s in zip(xs, specialize)
)
new_xs = [x for x, s in zip(xs, specialize) if not s]
g2 = specialize_transform(g, specialize_map)
return node.graph.apply(g2, *new_xs) |
Python | def env_getitem_transform(orig_graph, key, default):
"""Map to a graph that incorporates a call to env_getitem."""
rel = getattr(key, "node", key)
graph = transformable_clone(orig_graph, relation=f"[{rel}]")
out = graph.output
while out.is_apply(P.env_setitem):
_, out, key2, value = out.inputs
if key == key2.value:
graph.output = value
break
else:
with untested():
graph.output = graph.apply(P.env_getitem, out, key, default)
graph.return_.abstract = key.abstract
return graph | def env_getitem_transform(orig_graph, key, default):
"""Map to a graph that incorporates a call to env_getitem."""
rel = getattr(key, "node", key)
graph = transformable_clone(orig_graph, relation=f"[{rel}]")
out = graph.output
while out.is_apply(P.env_setitem):
_, out, key2, value = out.inputs
if key == key2.value:
graph.output = value
break
else:
with untested():
graph.output = graph.apply(P.env_getitem, out, key, default)
graph.return_.abstract = key.abstract
return graph |
Python | def incorporate_env_getitem(resources, node, equiv):
"""Incorporate an env_getitem into a call."""
g = equiv[G].value
key = equiv[C].value
dflt = equiv[Y]
if check_used_once(g):
return node.graph.apply(env_getitem_transform(g, key, dflt), *equiv[Xs]) | def incorporate_env_getitem(resources, node, equiv):
"""Incorporate an env_getitem into a call."""
g = equiv[G].value
key = equiv[C].value
dflt = equiv[Y]
if check_used_once(g):
return node.graph.apply(env_getitem_transform(g, key, dflt), *equiv[Xs]) |
Python | def call_output_transform(orig_graph, abstracts):
"""Map to a graph that calls its output.
((*args1) -> (*args2) -> f) => (*args1, *args2) -> f(*args2)
"""
graph = transformable_clone(orig_graph, relation="call")
newp = []
for a in abstracts:
assert a is not None
p = graph.add_parameter()
p.abstract = a
newp.append(p)
graph.output = graph.apply(graph.output, *newp)
_set_out_abstract(graph, orig_graph.return_.abstract.output)
return graph | def call_output_transform(orig_graph, abstracts):
"""Map to a graph that calls its output.
((*args1) -> (*args2) -> f) => (*args1, *args2) -> f(*args2)
"""
graph = transformable_clone(orig_graph, relation="call")
newp = []
for a in abstracts:
assert a is not None
p = graph.add_parameter()
p.abstract = a
newp.append(p)
graph.output = graph.apply(graph.output, *newp)
_set_out_abstract(graph, orig_graph.return_.abstract.output)
return graph |
Python | def incorporate_call(resources, node, equiv):
"""Incorporate a call into the graph that returns the function.
Example:
g(x)(y) => g2(x, y)
Where g2 is a modified copy of g that incorporates the call on y.
"""
g = equiv[G].value
xs = equiv[Xs]
ys = equiv[Ys]
if check_used_once(g):
g2 = call_output_transform(g, tuple(y.abstract for y in ys))
return node.graph.apply(g2, *xs, *ys) | def incorporate_call(resources, node, equiv):
"""Incorporate a call into the graph that returns the function.
Example:
g(x)(y) => g2(x, y)
Where g2 is a modified copy of g that incorporates the call on y.
"""
g = equiv[G].value
xs = equiv[Xs]
ys = equiv[Ys]
if check_used_once(g):
g2 = call_output_transform(g, tuple(y.abstract for y in ys))
return node.graph.apply(g2, *xs, *ys) |
Python | def incorporate_call_through_switch(resources, node, equiv):
"""Incorporate a call to both branches.
Example:
switch(x, f, g)(y)(z)
=> switch(x, f2, g2)(y, z)
Where f2 and g2 are modified copies of f and g that incorporate the
call on both y and z.
"""
g1 = equiv[G1].value
g2 = equiv[G2].value
xs = equiv[Xs]
ys = equiv[Ys]
if check_used_once(g1) and check_used_once(g2):
g1t = call_output_transform(g1, tuple(y.abstract for y in ys))
g2t = call_output_transform(g2, tuple(y.abstract for y in ys))
new = ((P.switch, equiv[X], g1t, g2t), *xs, *ys)
return sexp_to_node(new, node.graph) | def incorporate_call_through_switch(resources, node, equiv):
"""Incorporate a call to both branches.
Example:
switch(x, f, g)(y)(z)
=> switch(x, f2, g2)(y, z)
Where f2 and g2 are modified copies of f and g that incorporate the
call on both y and z.
"""
g1 = equiv[G1].value
g2 = equiv[G2].value
xs = equiv[Xs]
ys = equiv[Ys]
if check_used_once(g1) and check_used_once(g2):
g1t = call_output_transform(g1, tuple(y.abstract for y in ys))
g2t = call_output_transform(g2, tuple(y.abstract for y in ys))
new = ((P.switch, equiv[X], g1t, g2t), *xs, *ys)
return sexp_to_node(new, node.graph) |
Python | def has_inner_j_on_function(node):
"""Check if given node is a graph containing itself a J on a sub-graph."""
if isinstance(node, Graph):
seen = {node}
todo = [node.output]
while todo:
n = todo.pop()
if n.inputs:
for inp in n.inputs:
if inp.is_apply(P.J) and inp.inputs[1].is_constant(Graph):
return True
else:
todo.append(inp)
elif n.is_constant_graph() and n.value not in seen:
seen.add(n.value)
todo.append(n.value.output)
return False | def has_inner_j_on_function(node):
"""Check if given node is a graph containing itself a J on a sub-graph."""
if isinstance(node, Graph):
seen = {node}
todo = [node.output]
while todo:
n = todo.pop()
if n.inputs:
for inp in n.inputs:
if inp.is_apply(P.J) and inp.inputs[1].is_constant(Graph):
return True
else:
todo.append(inp)
elif n.is_constant_graph() and n.value not in seen:
seen.add(n.value)
todo.append(n.value.output)
return False |
Python | def opt_jelim(resources):
"""Eliminate J, iff it is only applied to non-functions."""
mng = resources.opt_manager
args = dict(opt=opt_jelim, node=None, manager=mng)
with tracer("opt", **args) as tr:
tr.set_results(success=False, **args)
nodes = []
typesubs = []
for node in mng.all_nodes:
try:
newtype = _jelim_retype(node.abstract)
except TypeError:
return {"changes": False}
if node.is_apply(P.J) or node.is_apply(P.Jinv):
if not _jelim_nofunc(node.abstract):
return {"changes": False}
_, x = node.inputs
nodes.append((node, x))
if newtype is not node.abstract:
typesubs.append((node, newtype))
with mng.transact() as tr:
for node, repl in nodes:
tr.replace(node, repl)
for node, newtype in typesubs:
node.abstract = newtype
if len(nodes) > 0:
tracer().emit_success(**args, new_node=None)
return {"changes": len(nodes) > 0} | def opt_jelim(resources):
"""Eliminate J, iff it is only applied to non-functions."""
mng = resources.opt_manager
args = dict(opt=opt_jelim, node=None, manager=mng)
with tracer("opt", **args) as tr:
tr.set_results(success=False, **args)
nodes = []
typesubs = []
for node in mng.all_nodes:
try:
newtype = _jelim_retype(node.abstract)
except TypeError:
return {"changes": False}
if node.is_apply(P.J) or node.is_apply(P.Jinv):
if not _jelim_nofunc(node.abstract):
return {"changes": False}
_, x = node.inputs
nodes.append((node, x))
if newtype is not node.abstract:
typesubs.append((node, newtype))
with mng.transact() as tr:
for node, repl in nodes:
tr.replace(node, repl)
for node, newtype in typesubs:
node.abstract = newtype
if len(nodes) > 0:
tracer().emit_success(**args, new_node=None)
return {"changes": len(nodes) > 0} |
Python | def map_tuples(self, g, params, tups):
"""Map each element of each tuple to a getitem on the parameter."""
rval = []
for tup, param in zip(tups, params):
if not isinstance(tup, AbstractTuple):
raise MyiaTypeError(f"Expected AbstractTuple, not {tup}")
rval.append(
[
g.apply(P.tuple_getitem, param, i)
for i, elem in enumerate(tup.elements)
]
)
return rval | def map_tuples(self, g, params, tups):
"""Map each element of each tuple to a getitem on the parameter."""
rval = []
for tup, param in zip(tups, params):
if not isinstance(tup, AbstractTuple):
raise MyiaTypeError(f"Expected AbstractTuple, not {tup}")
rval.append(
[
g.apply(P.tuple_getitem, param, i)
for i, elem in enumerate(tup.elements)
]
)
return rval |
Python | def tuple_getslice(self, g, args):
"""Metagraph for getting a slice from a tuple."""
tuparg, start, stop, step = check_nargs("tail", 4, args)
try:
start = build_value(start)
stop = build_value(stop)
step = build_value(step)
except ValueError:
raise MyiaTypeError("Slice start, stop and step must be static")
(tup,) = self.map_tuples(g, g.parameters[:1], [tuparg])
return g.apply(P.make_tuple, *tup[start:stop:step]) | def tuple_getslice(self, g, args):
"""Metagraph for getting a slice from a tuple."""
tuparg, start, stop, step = check_nargs("tail", 4, args)
try:
start = build_value(start)
stop = build_value(stop)
step = build_value(step)
except ValueError:
raise MyiaTypeError("Slice start, stop and step must be static")
(tup,) = self.map_tuples(g, g.parameters[:1], [tuparg])
return g.apply(P.make_tuple, *tup[start:stop:step]) |
Python | async def infer_scatter_add(
self,
engine,
input: lib.AbstractArray,
dim: xtype.UInt[64],
index: lib.AbstractArray,
src: lib.AbstractArray,
):
"""Infer the return type of primitive `scatter_add`."""
return input | async def infer_scatter_add(
self,
engine,
input: lib.AbstractArray,
dim: xtype.UInt[64],
index: lib.AbstractArray,
src: lib.AbstractArray,
):
"""Infer the return type of primitive `scatter_add`."""
return input |
Python | def process_string(self, sequence, remove_repetitions=False):
"""
Given a string, removes blanks and replace space character with space.
Option to remove repetitions (e.g. 'abbca' -> 'abca').
Arguments:
sequence (array of int): 1-d array of integers
remove_repetitions (boolean, optional): If true, repeating characters
are removed. Defaults to False.
"""
string = ''
for i, char in enumerate(sequence):
if(char != self.int_to_char[self.blank_index]):
# if this char is a repetition and remove_repetitions=true,
# skip.
if(remove_repetitions and i != 0 and char == sequence[i - 1]):
pass
elif(char == self.alphabet[self.space_index]):
string = string + ' '
else:
string = string + char
return string | def process_string(self, sequence, remove_repetitions=False):
"""
Given a string, removes blanks and replace space character with space.
Option to remove repetitions (e.g. 'abbca' -> 'abca').
Arguments:
sequence (array of int): 1-d array of integers
remove_repetitions (boolean, optional): If true, repeating characters
are removed. Defaults to False.
"""
string = ''
for i, char in enumerate(sequence):
if(char != self.int_to_char[self.blank_index]):
# if this char is a repetition and remove_repetitions=true,
# skip.
if(remove_repetitions and i != 0 and char == sequence[i - 1]):
pass
elif(char == self.alphabet[self.space_index]):
string = string + ' '
else:
string = string + char
return string |
Python | def log_sum(self, list_of_probs):
"""
Computes the sum of log-probabilities.
Arguments:
list_of_probs (iterable): list of log-probabilities
"""
return np.log(np.sum([np.exp(p) for p in list_of_probs])) | def log_sum(self, list_of_probs):
"""
Computes the sum of log-probabilities.
Arguments:
list_of_probs (iterable): list of log-probabilities
"""
return np.log(np.sum([np.exp(p) for p in list_of_probs])) |
Python | def wer(self, s1, s2):
"""
Computes the Word Error Rate, defined as the edit distance between the
two provided sentences after tokenizing to words.
Arguments:
s1 (string): space-separated sentence
s2 (string): space-separated sentence
"""
# build mapping of words to integers
b = set(s1.split() + s2.split())
word2char = dict(zip(b, range(len(b))))
# map the words to a char array (Levenshtein packages only accepts
# strings)
w1 = [chr(word2char[w]) for w in s1.split()]
w2 = [chr(word2char[w]) for w in s2.split()]
return Lev.distance(''.join(w1), ''.join(w2)) | def wer(self, s1, s2):
"""
Computes the Word Error Rate, defined as the edit distance between the
two provided sentences after tokenizing to words.
Arguments:
s1 (string): space-separated sentence
s2 (string): space-separated sentence
"""
# build mapping of words to integers
b = set(s1.split() + s2.split())
word2char = dict(zip(b, range(len(b))))
# map the words to a char array (Levenshtein packages only accepts
# strings)
w1 = [chr(word2char[w]) for w in s1.split()]
w2 = [chr(word2char[w]) for w in s2.split()]
return Lev.distance(''.join(w1), ''.join(w2)) |
Python | def cer(self, s1, s2):
"""
Computes the Character Error Rate, defined as the edit distance.
Arguments:
s1 (string): space-separated sentence
s2 (string): space-separated sentence
"""
return Lev.distance(s1, s2) | def cer(self, s1, s2):
"""
Computes the Character Error Rate, defined as the edit distance.
Arguments:
s1 (string): space-separated sentence
s2 (string): space-separated sentence
"""
return Lev.distance(s1, s2) |
Python | def decode(self, probs):
"""
Given a matrix of character probabilities, returns the decoder's
best guess of the transcription
Arguments:
probs (ndarray): Matrix of character probabilities, where probs[c,t]
is the probability of character c at time t
Returns:
string: sequence of the model's best guess for the transcription
"""
raise NotImplementedError | def decode(self, probs):
"""
Given a matrix of character probabilities, returns the decoder's
best guess of the transcription
Arguments:
probs (ndarray): Matrix of character probabilities, where probs[c,t]
is the probability of character c at time t
Returns:
string: sequence of the model's best guess for the transcription
"""
raise NotImplementedError |
Python | def decode(self, probs):
"""
Returns the argmax decoding given the probability matrix. Removes
repeated elements in the sequence, as well as blanks.
"""
string = self.convert_to_string(np.argmax(probs, axis=0))
return self.process_string(string, remove_repetitions=True) | def decode(self, probs):
"""
Returns the argmax decoding given the probability matrix. Removes
repeated elements in the sequence, as well as blanks.
"""
string = self.convert_to_string(np.argmax(probs, axis=0))
return self.process_string(string, remove_repetitions=True) |
Python | def write_manifest(output_file, *filenames):
""" Writes out a manifest file from a series of lists of filenames
"""
with open(output_file, "w") as fid:
fid.write("@{}\n".format("\t".join(["FILE"] * len(filenames))))
for line in zip(*filenames):
fid.write("\t".join(line) + "\n")
return True | def write_manifest(output_file, *filenames):
""" Writes out a manifest file from a series of lists of filenames
"""
with open(output_file, "w") as fid:
fid.write("@{}\n".format("\t".join(["FILE"] * len(filenames))))
for line in zip(*filenames):
fid.write("\t".join(line) + "\n")
return True |
Python | def main(input_directory, transcript_directory, manifest_file):
""" Finds all .flac files recursively in input_directory, then extracts the
transcript from the nearby .trans.txt file and stores it in
transcript_directory. Writes a manifest file referring to each .flac file
and its paired transcript.
Arguments:
input_directory (string): Path to librispeech directory
transcript_directory (string): Path to directory in which to write
individual transcript files.
manifest_file (string): Path to manifest file to output.
"""
def librispeech_flac_filename(filestr):
parts = filestr.split("-")
return os.path.join(input_directory, parts[0], parts[1],
"{}.flac".format(filestr))
if not os.path.isdir(input_directory):
raise IOError("Data directory does not exist! {}".format(input_directory))
if not os.path.exists(transcript_directory):
os.makedirs(transcript_directory)
transcript_files = glob.glob(os.path.join(input_directory, '*/*/*.txt'))
if len(transcript_files) == 0:
logger.error("No .txt files were found in {}".format(input_directory))
return
logger.info("Beginning audio conversions")
audio_files = list()
txt_files = list()
for ii, tfile in enumerate(transcript_files):
# transcript file specifies transcript and flac filename for all librispeech files
logger.info("Converting audio corresponding to transcript "
"{} of {}".format(ii, len(transcript_files)))
with open(tfile, "r") as fid:
lines = fid.readlines()
for line in lines:
filestr, transcript = line.split(" ", 1)
try:
flac_file = librispeech_flac_filename(filestr)
except IndexError: # filestr is not the format we are expecting
print("filestr of unexpected formatting: {}".format(filestr))
print("error in {}".format(tfile))
continue
txt_file = os.path.join(transcript_directory,
"{}.txt".format(filestr))
# Write out short transcript file
with open(txt_file, "w") as fid:
fid.write(transcript.strip())
# Add to output lists to be written to manifest
audio_files.append(flac_file)
txt_files.append(txt_file)
logger.info("Writing manifest file to {}".format(manifest_file))
return write_manifest(manifest_file, audio_files, txt_files) | def main(input_directory, transcript_directory, manifest_file):
""" Finds all .flac files recursively in input_directory, then extracts the
transcript from the nearby .trans.txt file and stores it in
transcript_directory. Writes a manifest file referring to each .flac file
and its paired transcript.
Arguments:
input_directory (string): Path to librispeech directory
transcript_directory (string): Path to directory in which to write
individual transcript files.
manifest_file (string): Path to manifest file to output.
"""
def librispeech_flac_filename(filestr):
parts = filestr.split("-")
return os.path.join(input_directory, parts[0], parts[1],
"{}.flac".format(filestr))
if not os.path.isdir(input_directory):
raise IOError("Data directory does not exist! {}".format(input_directory))
if not os.path.exists(transcript_directory):
os.makedirs(transcript_directory)
transcript_files = glob.glob(os.path.join(input_directory, '*/*/*.txt'))
if len(transcript_files) == 0:
logger.error("No .txt files were found in {}".format(input_directory))
return
logger.info("Beginning audio conversions")
audio_files = list()
txt_files = list()
for ii, tfile in enumerate(transcript_files):
# transcript file specifies transcript and flac filename for all librispeech files
logger.info("Converting audio corresponding to transcript "
"{} of {}".format(ii, len(transcript_files)))
with open(tfile, "r") as fid:
lines = fid.readlines()
for line in lines:
filestr, transcript = line.split(" ", 1)
try:
flac_file = librispeech_flac_filename(filestr)
except IndexError: # filestr is not the format we are expecting
print("filestr of unexpected formatting: {}".format(filestr))
print("error in {}".format(tfile))
continue
txt_file = os.path.join(transcript_directory,
"{}.txt".format(filestr))
# Write out short transcript file
with open(txt_file, "w") as fid:
fid.write(transcript.strip())
# Add to output lists to be written to manifest
audio_files.append(flac_file)
txt_files.append(txt_file)
logger.info("Writing manifest file to {}".format(manifest_file))
return write_manifest(manifest_file, audio_files, txt_files) |
Python | def wrap_dataloader(dl):
""" Data is loaded from Aeon as a 4-tuple. We need to cast the audio
(index 0) from int8 to float32 and repack the data into (audio, 3-tuple).
"""
dl = TypeCast(dl, index=0, dtype=np.float32)
dl = Retuple(dl, data=(0,), target=(2, 3, 1))
return dl | def wrap_dataloader(dl):
""" Data is loaded from Aeon as a 4-tuple. We need to cast the audio
(index 0) from int8 to float32 and repack the data into (audio, 3-tuple).
"""
dl = TypeCast(dl, index=0, dtype=np.float32)
dl = Retuple(dl, data=(0,), target=(2, 3, 1))
return dl |
Python | def cpu_ctc_np(acts, act_lens, labels, label_lens):
"""
acts: 3-d numpy float array, same as c++ bindings
act_lens: 1-d int array of input length of each example
labels: list of 1-d int array for each example in minibatch
label_lens: 1-d int array of label length of each example
"""
# make sure correct types
acts = np.array(acts, dtype=np.float32)
act_lens = np.array(act_lens, dtype=np.int32)
labels = np.array(labels, dtype=np.int32)
label_lens = np.array(label_lens, dtype=np.int32)
# C needs sizes
alphabet_size = acts.shape[2]
minibatch = acts.shape[1]
# create return variables
grads = np.zeros_like(acts, dtype=np.float32)
cost = np.zeros((minibatch,), dtype=np.float32)
# compute
libwarpctc.cpu_ctc(acts, grads, labels, label_lens, act_lens, alphabet_size, minibatch, cost, 1)
return cost, grads | def cpu_ctc_np(acts, act_lens, labels, label_lens):
"""
acts: 3-d numpy float array, same as c++ bindings
act_lens: 1-d int array of input length of each example
labels: list of 1-d int array for each example in minibatch
label_lens: 1-d int array of label length of each example
"""
# make sure correct types
acts = np.array(acts, dtype=np.float32)
act_lens = np.array(act_lens, dtype=np.int32)
labels = np.array(labels, dtype=np.int32)
label_lens = np.array(label_lens, dtype=np.int32)
# C needs sizes
alphabet_size = acts.shape[2]
minibatch = acts.shape[1]
# create return variables
grads = np.zeros_like(acts, dtype=np.float32)
cost = np.zeros((minibatch,), dtype=np.float32)
# compute
libwarpctc.cpu_ctc(acts, grads, labels, label_lens, act_lens, alphabet_size, minibatch, cost, 1)
return cost, grads |
Python | def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 8
(self.num,) = _get_struct_q().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.first_name = str[start:end].decode('utf-8')
else:
self.first_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.last_name = str[start:end].decode('utf-8')
else:
self.last_name = str[start:end]
_x = self
start = end
end += 5
(_x.age, _x.score,) = _get_struct_BI().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) | def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 8
(self.num,) = _get_struct_q().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.first_name = str[start:end].decode('utf-8')
else:
self.first_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.last_name = str[start:end].decode('utf-8')
else:
self.last_name = str[start:end]
_x = self
start = end
end += 5
(_x.age, _x.score,) = _get_struct_BI().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) |
Python | def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_q().pack(self.num))
_x = self.first_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.last_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_BI().pack(_x.age, _x.score))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) | def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_get_struct_q().pack(self.num))
_x = self.first_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.last_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_get_struct_BI().pack(_x.age, _x.score))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self))))) |
Python | def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 8
(self.num,) = _get_struct_q().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.first_name = str[start:end].decode('utf-8')
else:
self.first_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.last_name = str[start:end].decode('utf-8')
else:
self.last_name = str[start:end]
_x = self
start = end
end += 5
(_x.age, _x.score,) = _get_struct_BI().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) | def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 8
(self.num,) = _get_struct_q().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.first_name = str[start:end].decode('utf-8')
else:
self.first_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.last_name = str[start:end].decode('utf-8')
else:
self.last_name = str[start:end]
_x = self
start = end
end += 5
(_x.age, _x.score,) = _get_struct_BI().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) |
Python | def check_for_extrapolations(ane_heights, RSD_heights):
"""
Check if columns are specified for other anemometer heights, and extract the column names.
:param ane_heights: dictionary of height labels and values for anemometers
:param RSD_heights: dictionary of height labels and values for RSD
:return extrapolation_type: None or str to decide what type of extrapolation to perform
Notes on what we need for extrapolation analysis
"""
unique_ane_hts = set(ane_heights.values()).difference(set(['unknown']))
unique_RSD_hts = set(RSD_heights.values()).difference(set(['unknown']))
overlapping_hts = unique_ane_hts.intersection(unique_RSD_hts)
extrapolation_type = None
if len(unique_ane_hts) == 2 and (max(unique_RSD_hts) > max(unique_ane_hts)):
print ('simple')
extrapolation_type = 'simple'
elif len(unique_ane_hts) > 2:
# We still need at least two ane heights that are lower than one RSD height
tmp = [sum([ht > a for a in unique_ane_hts]) >= 2 for ht in unique_RSD_hts]
if any(tmp):
extrapolation_type = 'simple'
# Otherwise we can have two ane heights that are lower than one overlapping height
tmp = [sum([ht > a for a in unique_ane_hts]) >= 2 for ht in overlapping_hts]
if any(tmp):
extrapolation_type = 'truth'
return extrapolation_type | def check_for_extrapolations(ane_heights, RSD_heights):
"""
Check if columns are specified for other anemometer heights, and extract the column names.
:param ane_heights: dictionary of height labels and values for anemometers
:param RSD_heights: dictionary of height labels and values for RSD
:return extrapolation_type: None or str to decide what type of extrapolation to perform
Notes on what we need for extrapolation analysis
"""
unique_ane_hts = set(ane_heights.values()).difference(set(['unknown']))
unique_RSD_hts = set(RSD_heights.values()).difference(set(['unknown']))
overlapping_hts = unique_ane_hts.intersection(unique_RSD_hts)
extrapolation_type = None
if len(unique_ane_hts) == 2 and (max(unique_RSD_hts) > max(unique_ane_hts)):
print ('simple')
extrapolation_type = 'simple'
elif len(unique_ane_hts) > 2:
# We still need at least two ane heights that are lower than one RSD height
tmp = [sum([ht > a for a in unique_ane_hts]) >= 2 for ht in unique_RSD_hts]
if any(tmp):
extrapolation_type = 'simple'
# Otherwise we can have two ane heights that are lower than one overlapping height
tmp = [sum([ht > a for a in unique_ane_hts]) >= 2 for ht in overlapping_hts]
if any(tmp):
extrapolation_type = 'truth'
return extrapolation_type |
Python | def perform_TI_extrapolation(inputdata, extrap_metadata, extrapolation_type, height):
"""
Perform the TI extrapolation on anemometer data.
:param inputdata: input data (dataframe)
:param extrap_metadata: DataFrame with metadata required for TI extrapolation
:param extrapolation_type: str to decide what type of extrapolation to perform
:param height: Primary comparison height (m)
:return inputdata: input data (dataframe) with additional columns
v2 = v1(z2/z1)^alpha
"""
# Calculate shear exponent
shearTimeseries = get_shear_exponent(inputdata, extrap_metadata, height)
# TI columns and heights
row = extrap_metadata.loc[extrap_metadata['type'] == 'extrap', :].squeeze()
col_extrap_ane, ht_extrap = get_extrap_col_and_ht(row['height'], row['num'], height,
sensor='Ane', var='TI')
col_extrap_RSD, ht_extrap = get_extrap_col_and_ht(row['height'], row['num'], height,
sensor='RSD', var='TI')
col_extrap_RSD_SD, ht_extrap = get_extrap_col_and_ht(row['height'], row['num'], height,
sensor='RSD', var='SD')
col_extrap_ane_SD, ht_extrap = get_extrap_col_and_ht(row['height'], row['num'], height,
sensor='Ane', var='SD')
# Select reference height just below extrapolation height
hts = extrap_metadata.loc[extrap_metadata['height'] < ht_extrap, :]
ref = hts.loc[hts['height'] == max(hts['height']), :].squeeze()
col_ref, ht_ref = get_extrap_col_and_ht(ref['height'], ref['num'], height, 'Ane')
col_ref_sd, ht_ref = get_extrap_col_and_ht(ref['height'], ref['num'], height, 'Ane', var='SD')
# Extrapolate wind speed and st. dev. and calculate extrapolated TI
WS_ane_extrap = power_law(inputdata[col_ref], ht_extrap, ht_ref, shearTimeseries)
SD_ane_extrap = power_law(inputdata[col_ref_sd], ht_extrap, ht_ref, -shearTimeseries)
TI_ane_extrap = SD_ane_extrap / WS_ane_extrap
# Extract available TI values
TI_RSD = inputdata[col_extrap_RSD].values
SD_RSD = inputdata[col_extrap_RSD_SD].values
if extrapolation_type == 'truth':
TI_ane_truth = inputdata[col_extrap_ane].values
# Insert new columns into DataFrame
inputdata['TI_RSD'] = TI_RSD
inputdata['TI_ane_extrap'] = TI_ane_extrap
if extrapolation_type == 'truth':
inputdata['TI_ane_truth'] = TI_ane_truth
results = pd.DataFrame(columns=['sensor', 'height', 'correction', 'm',
'c', 'rsquared', 'difference','mse', 'rmse'])
results = post_correction_stats(inputdata,results, 'TI_RSD','TI_ane_extrap')
restults = post_correction_stats(inputdata,results, 'TI_ane_truth', 'TI_RSD')
if extrapolation_type == 'truth':
results = post_correction_stats(inputdata,results, 'TI_ane_truth','TI_ane_extrap')
return inputdata, results, shearTimeseries | def perform_TI_extrapolation(inputdata, extrap_metadata, extrapolation_type, height):
"""
Perform the TI extrapolation on anemometer data.
:param inputdata: input data (dataframe)
:param extrap_metadata: DataFrame with metadata required for TI extrapolation
:param extrapolation_type: str to decide what type of extrapolation to perform
:param height: Primary comparison height (m)
:return inputdata: input data (dataframe) with additional columns
v2 = v1(z2/z1)^alpha
"""
# Calculate shear exponent
shearTimeseries = get_shear_exponent(inputdata, extrap_metadata, height)
# TI columns and heights
row = extrap_metadata.loc[extrap_metadata['type'] == 'extrap', :].squeeze()
col_extrap_ane, ht_extrap = get_extrap_col_and_ht(row['height'], row['num'], height,
sensor='Ane', var='TI')
col_extrap_RSD, ht_extrap = get_extrap_col_and_ht(row['height'], row['num'], height,
sensor='RSD', var='TI')
col_extrap_RSD_SD, ht_extrap = get_extrap_col_and_ht(row['height'], row['num'], height,
sensor='RSD', var='SD')
col_extrap_ane_SD, ht_extrap = get_extrap_col_and_ht(row['height'], row['num'], height,
sensor='Ane', var='SD')
# Select reference height just below extrapolation height
hts = extrap_metadata.loc[extrap_metadata['height'] < ht_extrap, :]
ref = hts.loc[hts['height'] == max(hts['height']), :].squeeze()
col_ref, ht_ref = get_extrap_col_and_ht(ref['height'], ref['num'], height, 'Ane')
col_ref_sd, ht_ref = get_extrap_col_and_ht(ref['height'], ref['num'], height, 'Ane', var='SD')
# Extrapolate wind speed and st. dev. and calculate extrapolated TI
WS_ane_extrap = power_law(inputdata[col_ref], ht_extrap, ht_ref, shearTimeseries)
SD_ane_extrap = power_law(inputdata[col_ref_sd], ht_extrap, ht_ref, -shearTimeseries)
TI_ane_extrap = SD_ane_extrap / WS_ane_extrap
# Extract available TI values
TI_RSD = inputdata[col_extrap_RSD].values
SD_RSD = inputdata[col_extrap_RSD_SD].values
if extrapolation_type == 'truth':
TI_ane_truth = inputdata[col_extrap_ane].values
# Insert new columns into DataFrame
inputdata['TI_RSD'] = TI_RSD
inputdata['TI_ane_extrap'] = TI_ane_extrap
if extrapolation_type == 'truth':
inputdata['TI_ane_truth'] = TI_ane_truth
results = pd.DataFrame(columns=['sensor', 'height', 'correction', 'm',
'c', 'rsquared', 'difference','mse', 'rmse'])
results = post_correction_stats(inputdata,results, 'TI_RSD','TI_ane_extrap')
restults = post_correction_stats(inputdata,results, 'TI_ane_truth', 'TI_RSD')
if extrapolation_type == 'truth':
results = post_correction_stats(inputdata,results, 'TI_ane_truth','TI_ane_extrap')
return inputdata, results, shearTimeseries |
Python | def power_law(uref, h, href, shear):
"""
Extrapolate wind speed (or other) according to power law.
NOTE: see https://en.wikipedia.org/wiki/Wind_profile_power_law
:param uref: wind speed at reference height (same units as extrapolated wind speed, u)
:param h: height of extrapolated wind speed (same units as href)
:param href: reference height (same units as h)
:param shear: shear exponent alpha (1/7 in neutral stability) (unitless)
:return u: extrapolated wind speed (same units as uref)
"""
u = np.array(uref) * np.array(h / href) ** np.array(shear)
return u | def power_law(uref, h, href, shear):
"""
Extrapolate wind speed (or other) according to power law.
NOTE: see https://en.wikipedia.org/wiki/Wind_profile_power_law
:param uref: wind speed at reference height (same units as extrapolated wind speed, u)
:param h: height of extrapolated wind speed (same units as href)
:param href: reference height (same units as h)
:param shear: shear exponent alpha (1/7 in neutral stability) (unitless)
:return u: extrapolated wind speed (same units as uref)
"""
u = np.array(uref) * np.array(h / href) ** np.array(shear)
return u |
Python | def perform_G_C_correction(inputdata):
'''
Note: comprehensive empirical correction from a dozen locations. Focuses on std. deviation
'''
results = pd.DataFrame(columns=['sensor', 'height', 'correction', 'm',
'c', 'rsquared', 'difference','mse', 'rmse'])
if inputdata.empty or len(inputdata) < 2:
results = post_correction_stats([None],results, 'Ref_TI','corrTI_RSD_TI')
if 'Ane_TI_Ht1' in inputdata.columns and 'RSD_TI_Ht1' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht1','corrTI_RSD_TI_Ht1')
if 'Ane_TI_Ht2' in inputdata.columns and 'RSD_TI_Ht2' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht2','corrTI_RSD_TI_Ht2')
if 'Ane_TI_Ht3' in inputdata.columns and 'RSD_TI_Ht3' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht3','corrTI_RSD_TI_Ht3')
if 'Ane_TI_Ht4' in inputdata.columns and 'RSD_TI_Ht4' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht4','corrTI_RSD_TI_Ht4')
m = np.NaN
c = np.NaN
inputdata_test = inputdata.copy()
inputdata = False
else:
inputdata_test, results = empirical_stdAdjustment(inputdata,results,'Ref_TI', 'RSD_TI', 'Ref_SD', 'RSD_SD', 'Ref_WS', 'RSD_WS')
if 'Ane_TI_Ht1' in inputdata.columns and 'RSD_TI_Ht1' in inputdata.columns:
inputdata_test, results = empirical_stdAdjustment(inputdata_test,results,'Ane_TI_Ht1','RSD_TI_Ht1', 'Ane_SD_Ht1', 'RSD_SD_Ht1','Ane_WS_Ht1', 'RSD_WS_Ht1')
if 'Ane_TI_Ht2' in inputdata.columns and 'RSD_TI_Ht2' in inputdata.columns:
inputdata_test, results = empirical_stdAdjustment(inputdata_test,results,'Ane_TI_Ht2','RSD_TI_Ht2', 'Ane_SD_Ht2', 'RSD_SD_Ht2','Ane_WS_Ht2', 'RSD_WS_Ht2')
if 'Ane_TI_Ht3' in inputdata.columns and 'RSD_TI_Ht3' in inputdata.columns:
inputdata_test, results = empirical_stdAdjustment(inputdata_test,results,'Ane_TI_Ht3','RSD_TI_Ht3', 'Ane_SD_Ht3', 'RSD_SD_Ht3','Ane_WS_Ht3', 'RSD_WS_Ht3')
if 'Ane_TI_Ht4' in inputdata.columns and 'RSD_TI_Ht4' in inputdata.columns:
inputdata_test, results = empirical_stdAdjustment(inputdata_test,results,'Ane_TI_Ht4','RSD_TI_Ht4', 'Ane_SD_Ht4', 'RSD_SD_Ht4','Ane_WS_Ht4', 'RSD_WS_Ht4')
results['correction'] = ['G-C'] * len(results)
results = results.drop(columns=['sensor','height'])
m = np.NaN
c = np.NaN
return inputdata_test, results, m, c | def perform_G_C_correction(inputdata):
'''
Note: comprehensive empirical correction from a dozen locations. Focuses on std. deviation
'''
results = pd.DataFrame(columns=['sensor', 'height', 'correction', 'm',
'c', 'rsquared', 'difference','mse', 'rmse'])
if inputdata.empty or len(inputdata) < 2:
results = post_correction_stats([None],results, 'Ref_TI','corrTI_RSD_TI')
if 'Ane_TI_Ht1' in inputdata.columns and 'RSD_TI_Ht1' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht1','corrTI_RSD_TI_Ht1')
if 'Ane_TI_Ht2' in inputdata.columns and 'RSD_TI_Ht2' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht2','corrTI_RSD_TI_Ht2')
if 'Ane_TI_Ht3' in inputdata.columns and 'RSD_TI_Ht3' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht3','corrTI_RSD_TI_Ht3')
if 'Ane_TI_Ht4' in inputdata.columns and 'RSD_TI_Ht4' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht4','corrTI_RSD_TI_Ht4')
m = np.NaN
c = np.NaN
inputdata_test = inputdata.copy()
inputdata = False
else:
inputdata_test, results = empirical_stdAdjustment(inputdata,results,'Ref_TI', 'RSD_TI', 'Ref_SD', 'RSD_SD', 'Ref_WS', 'RSD_WS')
if 'Ane_TI_Ht1' in inputdata.columns and 'RSD_TI_Ht1' in inputdata.columns:
inputdata_test, results = empirical_stdAdjustment(inputdata_test,results,'Ane_TI_Ht1','RSD_TI_Ht1', 'Ane_SD_Ht1', 'RSD_SD_Ht1','Ane_WS_Ht1', 'RSD_WS_Ht1')
if 'Ane_TI_Ht2' in inputdata.columns and 'RSD_TI_Ht2' in inputdata.columns:
inputdata_test, results = empirical_stdAdjustment(inputdata_test,results,'Ane_TI_Ht2','RSD_TI_Ht2', 'Ane_SD_Ht2', 'RSD_SD_Ht2','Ane_WS_Ht2', 'RSD_WS_Ht2')
if 'Ane_TI_Ht3' in inputdata.columns and 'RSD_TI_Ht3' in inputdata.columns:
inputdata_test, results = empirical_stdAdjustment(inputdata_test,results,'Ane_TI_Ht3','RSD_TI_Ht3', 'Ane_SD_Ht3', 'RSD_SD_Ht3','Ane_WS_Ht3', 'RSD_WS_Ht3')
if 'Ane_TI_Ht4' in inputdata.columns and 'RSD_TI_Ht4' in inputdata.columns:
inputdata_test, results = empirical_stdAdjustment(inputdata_test,results,'Ane_TI_Ht4','RSD_TI_Ht4', 'Ane_SD_Ht4', 'RSD_SD_Ht4','Ane_WS_Ht4', 'RSD_WS_Ht4')
results['correction'] = ['G-C'] * len(results)
results = results.drop(columns=['sensor','height'])
m = np.NaN
c = np.NaN
return inputdata_test, results, m, c |
Python | def perform_G_SFc_correction(inputdata):
'''
simple filtered regression results from phase 2 averages used
'''
results = pd.DataFrame(columns=['sensor', 'height', 'correction', 'm',
'c', 'rsquared', 'difference','mse', 'rmse'])
m = 0.7086
c = 0.0225
if inputdata.empty or len(inputdata) < 2:
results = post_correction_stats([None],results, 'Ref_TI','corrTI_RSD_TI')
if 'Ane_TI_Ht1' in inputdata.columns and 'RSD_TI_Ht1' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht1','corrTI_RSD_TI_Ht1')
if 'Ane_TI_Ht2' in inputdata.columns and 'RSD_TI_Ht2' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht2','corrTI_RSD_TI_Ht2')
if 'Ane_TI_Ht3' in inputdata.columns and 'RSD_TI_Ht3' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht3','corrTI_RSD_TI_Ht3')
if 'Ane_TI_Ht4' in inputdata.columns and 'RSD_TI_Ht4' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht4','corrTI_RSD_TI_Ht4')
inputdata = False
else:
full = pd.DataFrame()
full['Ref_TI'] = inputdata['Ref_TI']
full['RSD_TI'] = inputdata['RSD_TI']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ref_TI','corrTI_RSD_TI')
else:
RSD_TI = inputdata['RSD_TI'].copy()
RSD_TI = (float(m)*RSD_TI) + float(c)
inputdata['corrTI_RSD_TI'] = RSD_TI
results = post_correction_stats(inputdata,results, 'Ref_TI','corrTI_RSD_TI')
if 'Ane_TI_Ht1' in inputdata.columns and 'RSD_TI_Ht1' in inputdata.columns:
full = pd.DataFrame()
full['Ane_TI_Ht1'] = inputdata['Ane_TI_Ht1']
full['RSD_TI_Ht1'] = inputdata['RSD_TI_Ht1']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ane_TI_Ht1','corrTI_RSD_TI_Ht1')
else:
RSD_TI = inputdata['RSD_TI_Ht1'].copy()
RSD_TI = (m*RSD_TI) + c
inputdata['corrTI_RSD_TI_Ht1'] = RSD_TI
results = post_correction_stats(inputdata,results, 'Ane_TI_Ht1','corrTI_RSD_TI_Ht1')
if 'Ane_TI_Ht2' in inputdata.columns and 'RSD_TI_Ht2' in inputdata.columns:
full = pd.DataFrame()
full['Ane_TI_Ht2'] = inputdata['Ane_TI_Ht2']
full['RSD_TI_Ht2'] = inputdata['RSD_TI_Ht2']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ane_TI_Ht2','corrTI_RSD_TI_Ht2')
else:
RSD_TI = inputdata['RSD_TI_Ht2'].copy()
RSD_TI = (m*RSD_TI) + c
inputdata['corrTI_RSD_TI_Ht2'] = RSD_TI
results = post_correction_stats(inputdata,results, 'Ane_TI_Ht2','corrTI_RSD_TI_Ht2')
if 'Ane_TI_Ht3' in inputdata.columns and 'RSD_TI_Ht3' in inputdata.columns:
full = pd.DataFrame()
full['Ane_TI_Ht3'] = inputdata['Ane_TI_Ht3']
full['RSD_TI_Ht3'] = inputdata['RSD_TI_Ht3']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ane_TI_Ht3','corrTI_RSD_TI_Ht3')
else:
RSD_TI = inputdata['RSD_TI_Ht3'].copy()
RSD_TI = (m*RSD_TI) + c
inputdata['corrTI_RSD_TI_Ht3'] = RSD_TI
results = post_correction_stats(inputdata,results, 'Ane_TI_Ht3','corrTI_RSD_TI_Ht3')
if 'Ane_TI_Ht4' in inputdata.columns and 'RSD_TI_Ht4' in inputdata.columns:
full = pd.DataFrame()
full['Ane_TI_Ht4'] = inputdata['Ane_TI_Ht4']
full['RSD_TI_Ht4'] = inputdata['RSD_TI_Ht4']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ane_TI_Ht4','corrTI_RSD_TI_Ht4')
else:
RSD_TI = inputdata['RSD_TI_Ht4'].copy()
RSD_TI = (m*RSD_TI) + c
inputdata['corrTI_RSD_TI_Ht4'] = RSD_TI
results = post_correction_stats(inputdata,results, 'Ane_TI_Ht4','corrTI_RSD_TI_Ht4')
results['correction'] = ['G-SFa'] * len(results)
results = results.drop(columns=['sensor','height'])
return inputdata, results, m, c | def perform_G_SFc_correction(inputdata):
'''
simple filtered regression results from phase 2 averages used
'''
results = pd.DataFrame(columns=['sensor', 'height', 'correction', 'm',
'c', 'rsquared', 'difference','mse', 'rmse'])
m = 0.7086
c = 0.0225
if inputdata.empty or len(inputdata) < 2:
results = post_correction_stats([None],results, 'Ref_TI','corrTI_RSD_TI')
if 'Ane_TI_Ht1' in inputdata.columns and 'RSD_TI_Ht1' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht1','corrTI_RSD_TI_Ht1')
if 'Ane_TI_Ht2' in inputdata.columns and 'RSD_TI_Ht2' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht2','corrTI_RSD_TI_Ht2')
if 'Ane_TI_Ht3' in inputdata.columns and 'RSD_TI_Ht3' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht3','corrTI_RSD_TI_Ht3')
if 'Ane_TI_Ht4' in inputdata.columns and 'RSD_TI_Ht4' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht4','corrTI_RSD_TI_Ht4')
inputdata = False
else:
full = pd.DataFrame()
full['Ref_TI'] = inputdata['Ref_TI']
full['RSD_TI'] = inputdata['RSD_TI']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ref_TI','corrTI_RSD_TI')
else:
RSD_TI = inputdata['RSD_TI'].copy()
RSD_TI = (float(m)*RSD_TI) + float(c)
inputdata['corrTI_RSD_TI'] = RSD_TI
results = post_correction_stats(inputdata,results, 'Ref_TI','corrTI_RSD_TI')
if 'Ane_TI_Ht1' in inputdata.columns and 'RSD_TI_Ht1' in inputdata.columns:
full = pd.DataFrame()
full['Ane_TI_Ht1'] = inputdata['Ane_TI_Ht1']
full['RSD_TI_Ht1'] = inputdata['RSD_TI_Ht1']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ane_TI_Ht1','corrTI_RSD_TI_Ht1')
else:
RSD_TI = inputdata['RSD_TI_Ht1'].copy()
RSD_TI = (m*RSD_TI) + c
inputdata['corrTI_RSD_TI_Ht1'] = RSD_TI
results = post_correction_stats(inputdata,results, 'Ane_TI_Ht1','corrTI_RSD_TI_Ht1')
if 'Ane_TI_Ht2' in inputdata.columns and 'RSD_TI_Ht2' in inputdata.columns:
full = pd.DataFrame()
full['Ane_TI_Ht2'] = inputdata['Ane_TI_Ht2']
full['RSD_TI_Ht2'] = inputdata['RSD_TI_Ht2']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ane_TI_Ht2','corrTI_RSD_TI_Ht2')
else:
RSD_TI = inputdata['RSD_TI_Ht2'].copy()
RSD_TI = (m*RSD_TI) + c
inputdata['corrTI_RSD_TI_Ht2'] = RSD_TI
results = post_correction_stats(inputdata,results, 'Ane_TI_Ht2','corrTI_RSD_TI_Ht2')
if 'Ane_TI_Ht3' in inputdata.columns and 'RSD_TI_Ht3' in inputdata.columns:
full = pd.DataFrame()
full['Ane_TI_Ht3'] = inputdata['Ane_TI_Ht3']
full['RSD_TI_Ht3'] = inputdata['RSD_TI_Ht3']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ane_TI_Ht3','corrTI_RSD_TI_Ht3')
else:
RSD_TI = inputdata['RSD_TI_Ht3'].copy()
RSD_TI = (m*RSD_TI) + c
inputdata['corrTI_RSD_TI_Ht3'] = RSD_TI
results = post_correction_stats(inputdata,results, 'Ane_TI_Ht3','corrTI_RSD_TI_Ht3')
if 'Ane_TI_Ht4' in inputdata.columns and 'RSD_TI_Ht4' in inputdata.columns:
full = pd.DataFrame()
full['Ane_TI_Ht4'] = inputdata['Ane_TI_Ht4']
full['RSD_TI_Ht4'] = inputdata['RSD_TI_Ht4']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ane_TI_Ht4','corrTI_RSD_TI_Ht4')
else:
RSD_TI = inputdata['RSD_TI_Ht4'].copy()
RSD_TI = (m*RSD_TI) + c
inputdata['corrTI_RSD_TI_Ht4'] = RSD_TI
results = post_correction_stats(inputdata,results, 'Ane_TI_Ht4','corrTI_RSD_TI_Ht4')
results['correction'] = ['G-SFa'] * len(results)
results = results.drop(columns=['sensor','height'])
return inputdata, results, m, c |
Python | def perform_SS_SS_correction(inputdata,All_class_data,primary_idx):
'''
simple site specific correction, but adjust each TKE class differently
'''
results = pd.DataFrame(columns=['sensor', 'height', 'correction', 'm',
'c', 'rsquared', 'difference','mse', 'rmse'])
className = 1
items_corrected = []
for item in All_class_data:
temp = item[primary_idx]
if temp.empty:
pass
else:
inputdata_test = temp[temp['split'] == True].copy()
inputdata_train = temp[temp['split'] == False].copy()
if inputdata_test.empty or len(inputdata_test) < 2 or inputdata_train.empty or len(inputdata_train) < 2:
pass
items_corrected.append(inputdata_test)
else:
# get te correction for this TKE class
full = pd.DataFrame()
full['Ref_TI'] = inputdata_test['Ref_TI']
full['RSD_TI'] = inputdata_test['RSD_TI']
full = full.dropna()
if len(full) < 2:
pass
else:
model = get_regression(inputdata_train['RSD_TI'], inputdata_train['Ref_TI'])
m = model[0]
c = model[1]
RSD_TI = inputdata_test['RSD_TI'].copy()
RSD_TI = (model[0]*RSD_TI) + model[1]
inputdata_test['corrTI_RSD_TI'] = RSD_TI
items_corrected.append(inputdata_test)
del temp
className += 1
correctedData = items_corrected[0]
for item in items_corrected[1:]:
correctedData = pd.concat([correctedData, item])
results = post_correction_stats(inputdata_test,results, 'Ref_TI','corrTI_RSD_TI')
results['correction'] = ['SS-SS'] * len(results)
results = results.drop(columns=['sensor','height'])
return inputdata_test, results, m, c | def perform_SS_SS_correction(inputdata,All_class_data,primary_idx):
'''
simple site specific correction, but adjust each TKE class differently
'''
results = pd.DataFrame(columns=['sensor', 'height', 'correction', 'm',
'c', 'rsquared', 'difference','mse', 'rmse'])
className = 1
items_corrected = []
for item in All_class_data:
temp = item[primary_idx]
if temp.empty:
pass
else:
inputdata_test = temp[temp['split'] == True].copy()
inputdata_train = temp[temp['split'] == False].copy()
if inputdata_test.empty or len(inputdata_test) < 2 or inputdata_train.empty or len(inputdata_train) < 2:
pass
items_corrected.append(inputdata_test)
else:
# get te correction for this TKE class
full = pd.DataFrame()
full['Ref_TI'] = inputdata_test['Ref_TI']
full['RSD_TI'] = inputdata_test['RSD_TI']
full = full.dropna()
if len(full) < 2:
pass
else:
model = get_regression(inputdata_train['RSD_TI'], inputdata_train['Ref_TI'])
m = model[0]
c = model[1]
RSD_TI = inputdata_test['RSD_TI'].copy()
RSD_TI = (model[0]*RSD_TI) + model[1]
inputdata_test['corrTI_RSD_TI'] = RSD_TI
items_corrected.append(inputdata_test)
del temp
className += 1
correctedData = items_corrected[0]
for item in items_corrected[1:]:
correctedData = pd.concat([correctedData, item])
results = post_correction_stats(inputdata_test,results, 'Ref_TI','corrTI_RSD_TI')
results['correction'] = ['SS-SS'] * len(results)
results = results.drop(columns=['sensor','height'])
return inputdata_test, results, m, c |
Python | def perform_SS_S_correction(inputdata):
'''
Note: Representative TI computed with original RSD_SD
'''
results = pd.DataFrame(columns=['sensor', 'height', 'correction', 'm',
'c', 'rsquared', 'difference','mse', 'rmse'])
inputdata_train = inputdata[inputdata['split'] == True].copy()
inputdata_test = inputdata[inputdata['split'] == False].copy()
if inputdata.empty or len(inputdata) < 2:
results = post_correction_stats([None],results, 'Ref_TI','corrTI_RSD_TI')
if 'Ane_TI_Ht1' in inputdata.columns and 'RSD_TI_Ht1' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht1','corrTI_RSD_TI_Ht1')
if 'Ane_TI_Ht2' in inputdata.columns and 'RSD_TI_Ht2' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht2','corrTI_RSD_TI_Ht2')
if 'Ane_TI_Ht3' in inputdata.columns and 'RSD_TI_Ht3' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht3','corrTI_RSD_TI_Ht3')
if 'Ane_TI_Ht4' in inputdata.columns and 'RSD_TI_Ht4' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht4','corrTI_RSD_TI_Ht4')
m = np.NaN
c = np.NaN
inputdata = False
else:
full = pd.DataFrame()
full['Ref_TI'] = inputdata_test['Ref_TI']
full['RSD_TI'] = inputdata_test['RSD_TI']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ref_TI','corrTI_RSD_TI')
m = np.NaN
c = np.NaN
else:
model = get_regression(inputdata_train['RSD_TI'], inputdata_train['Ref_TI'])
m = model[0]
c = model[1]
RSD_TI = inputdata_test['RSD_TI'].copy()
RSD_TI = (model[0]*RSD_TI) + model[1]
inputdata_test['corrTI_RSD_TI'] = RSD_TI
results = post_correction_stats(inputdata_test,results, 'Ref_TI','corrTI_RSD_TI')
if 'Ane_TI_Ht1' in inputdata.columns and 'RSD_TI_Ht1' in inputdata.columns:
full = pd.DataFrame()
full['Ref_TI'] = inputdata_test['Ane_TI_Ht1']
full['RSD_TI'] = inputdata_test['RSD_TI_Ht1']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ane_TI_Ht1','corrTI_RSD_TI_Ht1')
m = np.NaN
c = np.NaN
else:
model = get_regression(inputdata_train['RSD_TI'], inputdata_train['Ref_TI'])
RSD_TI = inputdata_test['RSD_TI_Ht1'].copy()
RSD_TI = (model[0]*RSD_TI) + model[1]
inputdata_test['corrTI_RSD_TI_Ht1'] = RSD_TI
results = post_correction_stats(inputdata_test,results, 'Ane_TI_Ht1','corrTI_RSD_TI_Ht1')
if 'Ane_TI_Ht2' in inputdata.columns and 'RSD_TI_Ht2' in inputdata.columns:
full = pd.DataFrame()
full['Ref_TI'] = inputdata_test['Ane_TI_Ht2']
full['RSD_TI'] = inputdata_test['RSD_TI_Ht2']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ane_TI_Ht2','corrTI_RSD_TI_Ht2')
m = np.NaN
c = np.NaN
else:
model = get_regression(inputdata_train['RSD_TI_Ht2'],inputdata_train['Ane_TI_Ht2'])
RSD_TI = inputdata_test['RSD_TI_Ht2'].copy()
RSD_TI = (model[0]*RSD_TI) + model[1]
inputdata_test['corrTI_RSD_TI_Ht2'] = RSD_TI
results = post_correction_stats(inputdata_test,results, 'Ane_TI_Ht2','corrTI_RSD_TI_Ht2')
if 'Ane_TI_Ht3' in inputdata.columns and 'RSD_TI_Ht3' in inputdata.columns:
full = pd.DataFrame()
full['Ref_TI'] = inputdata_test['Ane_TI_Ht3']
full['RSD_TI'] = inputdata_test['RSD_TI_Ht3']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ane_TI_Ht3','corrTI_RSD_TI_Ht3')
m = np.NaN
c = np.NaN
else:
model = get_regression(inputdata_train['RSD_TI_Ht3'], inputdata_train['Ane_TI_Ht3'])
RSD_TI = inputdata_test['RSD_TI_Ht3'].copy()
RSD_TI = (model[0]*RSD_TI) + model[1]
inputdata_test['corrTI_RSD_TI_Ht3'] = RSD_TI
results = post_correction_stats(inputdata_test,results, 'Ane_TI_Ht3','corrTI_RSD_TI_Ht3')
if 'Ane_TI_Ht4' in inputdata.columns and 'RSD_TI_Ht4' in inputdata.columns:
full = pd.DataFrame()
full['Ref_TI'] = inputdata_test['Ane_TI_Ht4']
full['RSD_TI'] = inputdata_test['RSD_TI_Ht4']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ane_TI_Ht4','corrTI_RSD_TI_Ht4')
m = np.NaN
c = np.NaN
else:
model = get_regression(inputdata_train['RSD_TI_Ht4'], inputdata_train['Ane_TI_Ht4'])
RSD_TI = inputdata_test['RSD_TI_Ht4'].copy()
RSD_TI = (model[0]*RSD_TI) + model[1]
inputdata_test['corrTI_RSD_TI_Ht4'] = RSD_TI
results = post_correction_stats(inputdata_test,results, 'Ane_TI_Ht4','corrTI_RSD_TI_Ht4')
results['correction'] = ['SS-S'] * len(results)
results = results.drop(columns=['sensor','height'])
return inputdata_test, results, m, c | def perform_SS_S_correction(inputdata):
'''
Note: Representative TI computed with original RSD_SD
'''
results = pd.DataFrame(columns=['sensor', 'height', 'correction', 'm',
'c', 'rsquared', 'difference','mse', 'rmse'])
inputdata_train = inputdata[inputdata['split'] == True].copy()
inputdata_test = inputdata[inputdata['split'] == False].copy()
if inputdata.empty or len(inputdata) < 2:
results = post_correction_stats([None],results, 'Ref_TI','corrTI_RSD_TI')
if 'Ane_TI_Ht1' in inputdata.columns and 'RSD_TI_Ht1' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht1','corrTI_RSD_TI_Ht1')
if 'Ane_TI_Ht2' in inputdata.columns and 'RSD_TI_Ht2' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht2','corrTI_RSD_TI_Ht2')
if 'Ane_TI_Ht3' in inputdata.columns and 'RSD_TI_Ht3' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht3','corrTI_RSD_TI_Ht3')
if 'Ane_TI_Ht4' in inputdata.columns and 'RSD_TI_Ht4' in inputdata.columns:
results = post_correction_stats([None],results, 'Ane_TI_Ht4','corrTI_RSD_TI_Ht4')
m = np.NaN
c = np.NaN
inputdata = False
else:
full = pd.DataFrame()
full['Ref_TI'] = inputdata_test['Ref_TI']
full['RSD_TI'] = inputdata_test['RSD_TI']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ref_TI','corrTI_RSD_TI')
m = np.NaN
c = np.NaN
else:
model = get_regression(inputdata_train['RSD_TI'], inputdata_train['Ref_TI'])
m = model[0]
c = model[1]
RSD_TI = inputdata_test['RSD_TI'].copy()
RSD_TI = (model[0]*RSD_TI) + model[1]
inputdata_test['corrTI_RSD_TI'] = RSD_TI
results = post_correction_stats(inputdata_test,results, 'Ref_TI','corrTI_RSD_TI')
if 'Ane_TI_Ht1' in inputdata.columns and 'RSD_TI_Ht1' in inputdata.columns:
full = pd.DataFrame()
full['Ref_TI'] = inputdata_test['Ane_TI_Ht1']
full['RSD_TI'] = inputdata_test['RSD_TI_Ht1']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ane_TI_Ht1','corrTI_RSD_TI_Ht1')
m = np.NaN
c = np.NaN
else:
model = get_regression(inputdata_train['RSD_TI'], inputdata_train['Ref_TI'])
RSD_TI = inputdata_test['RSD_TI_Ht1'].copy()
RSD_TI = (model[0]*RSD_TI) + model[1]
inputdata_test['corrTI_RSD_TI_Ht1'] = RSD_TI
results = post_correction_stats(inputdata_test,results, 'Ane_TI_Ht1','corrTI_RSD_TI_Ht1')
if 'Ane_TI_Ht2' in inputdata.columns and 'RSD_TI_Ht2' in inputdata.columns:
full = pd.DataFrame()
full['Ref_TI'] = inputdata_test['Ane_TI_Ht2']
full['RSD_TI'] = inputdata_test['RSD_TI_Ht2']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ane_TI_Ht2','corrTI_RSD_TI_Ht2')
m = np.NaN
c = np.NaN
else:
model = get_regression(inputdata_train['RSD_TI_Ht2'],inputdata_train['Ane_TI_Ht2'])
RSD_TI = inputdata_test['RSD_TI_Ht2'].copy()
RSD_TI = (model[0]*RSD_TI) + model[1]
inputdata_test['corrTI_RSD_TI_Ht2'] = RSD_TI
results = post_correction_stats(inputdata_test,results, 'Ane_TI_Ht2','corrTI_RSD_TI_Ht2')
if 'Ane_TI_Ht3' in inputdata.columns and 'RSD_TI_Ht3' in inputdata.columns:
full = pd.DataFrame()
full['Ref_TI'] = inputdata_test['Ane_TI_Ht3']
full['RSD_TI'] = inputdata_test['RSD_TI_Ht3']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ane_TI_Ht3','corrTI_RSD_TI_Ht3')
m = np.NaN
c = np.NaN
else:
model = get_regression(inputdata_train['RSD_TI_Ht3'], inputdata_train['Ane_TI_Ht3'])
RSD_TI = inputdata_test['RSD_TI_Ht3'].copy()
RSD_TI = (model[0]*RSD_TI) + model[1]
inputdata_test['corrTI_RSD_TI_Ht3'] = RSD_TI
results = post_correction_stats(inputdata_test,results, 'Ane_TI_Ht3','corrTI_RSD_TI_Ht3')
if 'Ane_TI_Ht4' in inputdata.columns and 'RSD_TI_Ht4' in inputdata.columns:
full = pd.DataFrame()
full['Ref_TI'] = inputdata_test['Ane_TI_Ht4']
full['RSD_TI'] = inputdata_test['RSD_TI_Ht4']
full = full.dropna()
if len(full) < 2:
results = post_correction_stats([None],results, 'Ane_TI_Ht4','corrTI_RSD_TI_Ht4')
m = np.NaN
c = np.NaN
else:
model = get_regression(inputdata_train['RSD_TI_Ht4'], inputdata_train['Ane_TI_Ht4'])
RSD_TI = inputdata_test['RSD_TI_Ht4'].copy()
RSD_TI = (model[0]*RSD_TI) + model[1]
inputdata_test['corrTI_RSD_TI_Ht4'] = RSD_TI
results = post_correction_stats(inputdata_test,results, 'Ane_TI_Ht4','corrTI_RSD_TI_Ht4')
results['correction'] = ['SS-S'] * len(results)
results = results.drop(columns=['sensor','height'])
return inputdata_test, results, m, c |
Python | def _log_ascend(data):
"""
Compute natural logarithm of x element-wise.
Args:
data (tvm.tensor.Tensor): Tensor of type float16, float32, int8, uint8, int32.
Returns:
tvm.tensor.Tensor of same type and shape as data
"""
in_data = data
dtype = in_data.dtype
utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT)
if dtype == "float32" and product_is_mini():
in_data = akg.tvm.compute(in_data.shape, lambda *indice: in_data(*indice).astype("float16"), name='type_cast')
output = akg.tvm.compute(in_data.shape, lambda *index: akg.tvm.log(in_data(*index)), name='log')
if dtype == "float32" and product_is_mini():
output = akg.tvm.compute(in_data.shape, lambda *indice: output(*indice).astype("float32"), name='res')
return output | def _log_ascend(data):
"""
Compute natural logarithm of x element-wise.
Args:
data (tvm.tensor.Tensor): Tensor of type float16, float32, int8, uint8, int32.
Returns:
tvm.tensor.Tensor of same type and shape as data
"""
in_data = data
dtype = in_data.dtype
utils.ops_dtype_check(dtype, utils.DtypeForDavinci.ALL_FLOAT)
if dtype == "float32" and product_is_mini():
in_data = akg.tvm.compute(in_data.shape, lambda *indice: in_data(*indice).astype("float16"), name='type_cast')
output = akg.tvm.compute(in_data.shape, lambda *index: akg.tvm.log(in_data(*index)), name='log')
if dtype == "float32" and product_is_mini():
output = akg.tvm.compute(in_data.shape, lambda *indice: output(*indice).astype("float32"), name='res')
return output |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.